create_mp_ring_vdev(void)
{
int i;
- uint8_t portid;
+ uint16_t portid;
struct pdump_tuples *pt = NULL;
struct rte_mempool *mbuf_pool = NULL;
char vdev_args[SIZE];
cmdline_fixed_string_t show;
cmdline_fixed_string_t bonding;
cmdline_fixed_string_t config;
- uint8_t port_id;
+ portid_t port_id;
};
static void cmd_show_bonding_config_parsed(void *parsed_result,
{
struct cmd_show_bonding_config_result *res = parsed_result;
int bonding_mode, agg_mode;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ portid_t slaves[RTE_MAX_ETHPORTS];
int num_slaves, num_active_slaves;
int primary_id;
int i;
cmdline_fixed_string_t vf;
cmdline_fixed_string_t vlan;
cmdline_fixed_string_t stripq;
- uint8_t port_id;
+ portid_t port_id;
uint16_t vf_id;
cmdline_fixed_string_t on_off;
};
rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
if (rc != 0) {
- printf("Failed to retrieve information for port: %hhu, "
+ printf("Failed to retrieve information for port: %u, "
"RX queue: %hu\nerror desc: %s(%d)\n",
port_id, queue_id, strerror(-rc), rc);
return;
rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
if (rc != 0) {
- printf("Failed to retrieve information for port: %hhu, "
+ printf("Failed to retrieve information for port: %u, "
"TX queue: %hu\nerror desc: %s(%d)\n",
port_id, queue_id, strerror(-rc), rc);
return;
struct timespec timestamp = {0, 0};
if (rte_eth_timesync_read_rx_timestamp(pi, ×tamp, index) < 0) {
- printf("Port %u RX timestamp registers not valid\n",
- (unsigned) pi);
+ printf("Port %u RX timestamp registers not valid\n", pi);
return;
}
printf("Port %u RX timestamp value %lu s %lu ns\n",
- (unsigned) pi, timestamp.tv_sec, timestamp.tv_nsec);
+ pi, timestamp.tv_sec, timestamp.tv_nsec);
}
#define MAX_TX_TMST_WAIT_MICROSECS 1000 /**< 1 milli-second */
if (wait_us >= MAX_TX_TMST_WAIT_MICROSECS) {
printf("Port %u TX timestamp registers not valid after "
"%u micro-seconds\n",
- (unsigned) pi, (unsigned) MAX_TX_TMST_WAIT_MICROSECS);
+ pi, MAX_TX_TMST_WAIT_MICROSECS);
return;
}
printf("Port %u TX timestamp value %lu s %lu ns validated after "
"%u micro-second%s\n",
- (unsigned) pi, timestamp.tv_sec, timestamp.tv_nsec, wait_us,
+ pi, timestamp.tv_sec, timestamp.tv_nsec, wait_us,
(wait_us == 1) ? "" : "s");
}
if (eth_type == ETHER_TYPE_1588) {
printf("Port %u Received PTP packet not filtered"
" by hardware\n",
- (unsigned) fs->rx_port);
+ fs->rx_port);
} else {
printf("Port %u Received non PTP packet type=0x%4x "
"len=%u\n",
- (unsigned) fs->rx_port, eth_type,
+ fs->rx_port, eth_type,
(unsigned) mb->pkt_len);
}
rte_pktmbuf_free(mb);
if (eth_type != ETHER_TYPE_1588) {
printf("Port %u Received NON PTP packet incorrectly"
" detected by hardware\n",
- (unsigned) fs->rx_port);
+ fs->rx_port);
rte_pktmbuf_free(mb);
return;
}
if (ptp_hdr->version != 0x02) {
printf("Port %u Received PTP V2 Ethernet frame with wrong PTP"
" protocol version 0x%x (should be 0x02)\n",
- (unsigned) fs->rx_port, ptp_hdr->version);
+ fs->rx_port, ptp_hdr->version);
rte_pktmbuf_free(mb);
return;
}
if (ptp_hdr->msg_id != PTP_SYNC_MESSAGE) {
printf("Port %u Received PTP V2 Ethernet frame with unexpected"
" message ID 0x%x (expected 0x0 - PTP_SYNC_MESSAGE)\n",
- (unsigned) fs->rx_port, ptp_hdr->msg_id);
+ fs->rx_port, ptp_hdr->msg_id);
rte_pktmbuf_free(mb);
return;
}
printf("Port %u IEEE1588 PTP V2 SYNC Message filtered by hardware\n",
- (unsigned) fs->rx_port);
+ fs->rx_port);
/*
* Check that the received PTP packet has been timestamped by the
if (! (mb->ol_flags & PKT_RX_IEEE1588_TMST)) {
printf("Port %u Received PTP packet not timestamped"
" by hardware\n",
- (unsigned) fs->rx_port);
+ fs->rx_port);
rte_pktmbuf_free(mb);
return;
}
mb->ol_flags |= PKT_TX_IEEE1588_TMST;
fs->tx_packets += 1;
if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) {
- printf("Port %u sent PTP packet dropped\n",
- (unsigned) fs->rx_port);
+ printf("Port %u sent PTP packet dropped\n", fs->rx_port);
fs->fwd_dropped += 1;
rte_pktmbuf_free(mb);
return;
if (!strcmp(lgopts[opt_idx].name, "nb-ports")) {
n = atoi(optarg);
if (n > 0 && n <= nb_ports)
- nb_fwd_ports = (uint8_t) n;
+ nb_fwd_ports = n;
else
rte_exit(EXIT_FAILURE,
"Invalid port %d\n", n);
*/
if (verbose_level > 0)
printf("port %u/queue %u: received %u packets\n",
- (unsigned) fs->rx_port,
+ fs->rx_port,
(unsigned) fs->rx_queue,
(unsigned) nb_rx);
for (i = 0; i < nb_rx; i++) {
/* Forward function declarations */
static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
static void check_all_ports_link_status(uint32_t port_mask);
-static int eth_event_callback(uint8_t port_id,
+static int eth_event_callback(portid_t port_id,
enum rte_eth_event_type type,
void *param, void *ret_param);
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ portid_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("Checking link statuses...\n");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. speed %u Mbps- %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
/* This function is used by the interrupt thread */
static int
-eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
+eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
void *ret_param)
{
static const char * const event_desc[] = {
main(int argc, char** argv)
{
int diag;
- uint8_t port_id;
+ portid_t port_id;
signal(SIGINT, signal_handler);
signal(SIGTERM, signal_handler);
#define UMA_NO_CONFIG 0xFF
typedef uint8_t lcoreid_t;
-typedef uint8_t portid_t;
+typedef uint16_t portid_t;
typedef uint16_t queueid_t;
typedef uint16_t streamid_t;
#define MAX_RX_QUEUE_STATS_MAPPINGS 4096 /* MAX_PORT of 32 @ 128 rx_queues/port */
struct queue_stats_mappings {
- uint8_t port_id;
+ portid_t port_id;
uint16_t queue_id;
uint8_t stats_counter_id;
} __rte_cache_aligned;
done by the EAL and not by the ``ethdev`` layer anymore. Users relying on this
flag being present only have to remove their checks to follow the change.
-* ABI/API changes are planned for 17.11 in all structures which include port_id
- definition such as "rte_eth_dev_data", "rte_port_ethdev_reader_params",
- "rte_port_ethdev_writer_params", and so on. The definition of port_id will be
- changed from 8 bits to 16 bits in order to support more than 256 ports in
- DPDK. All APIs which have port_id parameter will be changed at the same time.
-
* ethdev: An ABI change is planned for 17.11 for the structure rte_eth_dev_data.
The size of the unique name will increase RTE_ETH_NAME_MAX_LEN from 32 to
64 characters to allow using a globally unique identifier (GUID) in this field.
Also, make sure to start the actual text at the margin.
=========================================================
+* **Extended port_id range from uint8_t to uint16_t.**
+
+ Increased port_id range from 8 bits to 16 bits in order to support more than
+ 256 ports in dpdk. All ethdev APIs which have port_id as parameter are changed
+ in the meantime.
+
* **nfp: Added PF support.**
Previously Netronome's NFP PMD had just support for VFs. PF support is
Also, make sure to start the actual text at the margin.
=========================================================
+* **Extended port_id range.**
+ The size of the field ``port_id`` in the ``rte_eth_dev_data`` structure
+ changed, as described in the `New Features` section.
Shared Library Versions
-----------------------
.. code-block:: diff
librte_acl.so.2
- librte_bitratestats.so.1
+ librte_bitratestats.so.2
librte_cfgfile.so.2
librte_cmdline.so.2
librte_cryptodev.so.3
librte_distributor.so.1
librte_eal.so.5
- librte_ethdev.so.7
+ librte_ethdev.so.8
librte_eventdev.so.2
librte_gro.so.1
librte_hash.so.2
librte_meter.so.1
librte_metrics.so.1
librte_net.so.1
- librte_pdump.so.1
+ librte_pdump.so.2
librte_pipeline.so.3
- librte_pmd_bnxt.so.1
- librte_pmd_bond.so.1
- librte_pmd_i40e.so.1
- librte_pmd_ixgbe.so.1
+ librte_pmd_bnxt.so.2
+ librte_pmd_bond.so.2
+ librte_pmd_i40e.so.2
+ librte_pmd_ixgbe.so.2
librte_pmd_ring.so.2
- librte_pmd_vhost.so.1
+ librte_pmd_vhost.so.2
librte_port.so.3
librte_power.so.1
librte_reorder.so.1
unsigned int framenum;
struct rte_mempool *mb_pool;
- uint8_t in_port;
+ uint16_t in_port;
volatile unsigned long rx_pkts;
volatile unsigned long err_pkts;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
status = eth_ark_tx_queue_stop(dev, i);
if (status != 0) {
- uint8_t port = dev->data->port_id;
+ uint16_t port = dev->data->port_id;
PMD_DRV_LOG(ERR,
"tx_queue stop anomaly"
" port %u, queue %u\n",
struct rte_eth_dev_data *dev_data;
/**< Back pointer to ethernet device data */
volatile uint32_t flags; /**< Device operational flags */
- uint8_t port_id; /**< Ethernet port identifier */
+ uint16_t port_id; /**< Ethernet port identifier */
struct rte_mempool *pool; /**< pkt mbuf mempool */
unsigned int guest_mbuf_size; /**< local pool mbuf size */
unsigned int host_mbuf_size; /**< host mbuf size */
uint16_t rx_cq_head; /**< Index of current rcq bd. */
uint16_t rx_cq_tail; /**< Index of last rcq bd. */
uint16_t queue_id; /**< RX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
struct bnx2x_softc *sc; /**< Ptr to dev_private data. */
};
uint16_t nb_tx_avail; /**< Number of TX descriptors available. */
uint16_t nb_tx_pages; /**< number of TX pages */
uint16_t queue_id; /**< TX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
struct bnx2x_softc *sc; /**< Ptr to dev_private data */
};
EXPORT_MAP := rte_pmd_bnxt_version.map
-LIBABIVER := 1
+LIBABIVER := 2
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
#define BNXT_FIRST_VF_FID 128
#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
#define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp))
- uint8_t port_id;
+ uint16_t port_id;
uint16_t first_vf_id;
uint16_t active_vfs;
uint16_t max_vfs;
if (link->link_status)
RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
- (uint8_t)(eth_dev->data->port_id),
+ eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
RTE_LOG(INFO, PMD, "Port %d Link Down\n",
- (uint8_t)(eth_dev->data->port_id));
+ eth_dev->data->port_id);
}
static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
uint16_t rx_free_thresh; /* max free RX desc to hold */
uint16_t queue_id; /* RX queue index */
uint16_t reg_idx; /* RX queue register index */
- uint8_t port_id; /* Device port identifier */
+ uint16_t port_id; /* Device port identifier */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
struct bnxt *bp;
uint16_t tx_next_rs; /* next desc to set RS bit */
uint16_t queue_id; /* TX queue index */
uint16_t reg_idx; /* TX queue register index */
- uint8_t port_id; /* Device port identifier */
+ uint16_t port_id; /* Device port identifier */
uint8_t pthresh; /* Prefetch threshold register */
uint8_t hthresh; /* Host threshold register */
uint8_t wthresh; /* Write-back threshold reg */
true : false;
}
-int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on)
+int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on)
{
struct rte_eth_dev *eth_dev;
struct bnxt *bp;
vnic->bd_stall = !(*on);
}
-int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on)
+int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
{
struct rte_eth_dev *eth_dev;
struct bnxt *bp;
return rc;
}
-int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
struct ether_addr *mac_addr)
{
struct rte_eth_dev *dev;
return rc;
}
-int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk)
{
struct rte_eth_dev *eth_dev;
return rc;
}
-int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
return rc;
}
-int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
}
int
-rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
return rc;
}
-int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
uint16_t rx_mask, uint8_t on)
{
struct rte_eth_dev *dev;
return rc;
}
-int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
uint64_t vf_mask, uint8_t vlan_on)
{
struct bnxt_vlan_table_entry *ve;
return rc;
}
-int rte_pmd_bnxt_get_vf_stats(uint8_t port,
+int rte_pmd_bnxt_get_vf_stats(uint16_t port,
uint16_t vf_id,
struct rte_eth_stats *stats)
{
return bnxt_hwrm_func_qstats(bp, bp->pf.first_vf_id + vf_id, stats);
}
-int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
+int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
uint16_t vf_id)
{
struct rte_eth_dev *dev;
return bnxt_hwrm_func_clr_stats(bp, bp->pf.first_vf_id + vf_id);
}
-int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id)
+int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
return bnxt_vf_vnic_count(bp, vf_id);
}
-int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id,
+int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
uint64_t *count)
{
struct rte_eth_dev *dev;
count);
}
-int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr,
+int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr,
uint32_t vf_id)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
uint16_t vlan_id)
{
struct rte_eth_dev *dev;
return rc;
}
-int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on)
+int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on);
/**
* Set the VF MAC address.
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* or *mac_addr* is invalid.
*/
-int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
struct ether_addr *mac_addr);
/**
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable vf vlan insert
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
uint16_t vlan_id);
/**
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
uint64_t vf_mask, uint8_t vlan_on);
/**
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on);
+int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on);
/**
* set all queues drop enable bit
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on);
+int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on);
/**
* Set the VF rate limit.
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* or *mac_addr* is invalid.
*/
-int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
/**
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_get_vf_stats(uint8_t port,
+int rte_pmd_bnxt_get_vf_stats(uint16_t port,
uint16_t vf_id,
struct rte_eth_stats *stats);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_reset_vf_stats(uint8_t port,
+int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
uint16_t vf_id);
/**
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on);
/**
* Set RX L2 Filtering mode of a VF of an Ethernet device.
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf,
+int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
uint16_t rx_mask, uint8_t on);
/**
* - (-ENOMEM) on an allocation failure
* - (-1) firmware interface error
*/
-int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id);
+int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id);
/**
* Queries the TX drop counter for the function
* - (-EINVAL) invalid vf_id specified.
* - (-ENOTSUP) Ethernet device is not a PF
*/
-int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id,
+int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
uint64_t *count);
/**
* - (-ENOTSUP) Ethernet device is not a PF
* - (-ENOMEM) on an allocation failure
*/
-int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
+int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *mac_addr,
uint32_t vf_id);
/**
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on);
#endif /* _PMD_BNXT_H_ */
EXPORT_MAP := rte_pmd_bond_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id);
+rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id);
/**
* Remove a slave rte_eth_dev device from the bonded device
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id);
+rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id);
/**
* Set link bonding mode of bonded device
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode);
+rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode);
/**
* Get link bonding mode of bonded device
* link bonding mode on success, negative value otherwise
*/
int
-rte_eth_bond_mode_get(uint8_t bonded_port_id);
+rte_eth_bond_mode_get(uint16_t bonded_port_id);
/**
* Set slave rte_eth_dev as primary slave of bonded device
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id);
+rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id);
/**
* Get primary slave of bonded device
* Port Id of primary slave on success, -1 on failure
*/
int
-rte_eth_bond_primary_get(uint8_t bonded_port_id);
+rte_eth_bond_primary_get(uint16_t bonded_port_id);
/**
* Populate an array with list of the slaves port id's of the bonded device
* negative value otherwise
*/
int
-rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len);
+rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len);
/**
* Populate an array with list of the active slaves port id's of the bonded
* negative value otherwise
*/
int
-rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
- uint8_t len);
+rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len);
/**
* Set explicit MAC address to use on bonded device and it's slaves.
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
+rte_eth_bond_mac_address_set(uint16_t bonded_port_id,
struct ether_addr *mac_addr);
/**
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_mac_address_reset(uint8_t bonded_port_id);
+rte_eth_bond_mac_address_reset(uint16_t bonded_port_id);
/**
* Set the transmit policy for bonded device to use when it is operating in
* 0 on success, negative value otherwise.
*/
int
-rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy);
+rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy);
/**
* Get the transmit policy set on bonded device for balance mode operation
* Balance transmit policy on success, negative value otherwise.
*/
int
-rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id);
+rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id);
/**
* Set the link monitoring frequency (in ms) for monitoring the link status of
*/
int
-rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms);
+rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms);
/**
* Get the current link monitoring frequency (in ms) for monitoring of the link
* Monitoring interval on success, negative value otherwise.
*/
int
-rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id);
+rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id);
/**
* 0 on success, negative value otherwise.
*/
int
-rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms);
+rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
+ uint32_t delay_ms);
/**
* Get the period in milliseconds set for delaying the disabling of a bonded
* Delay period on success, negative value otherwise.
*/
int
-rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id);
+rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id);
/**
* Set the period in milliseconds for delaying the enabling of a bonded link
* 0 on success, negative value otherwise.
*/
int
-rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms);
+rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id,
+ uint32_t delay_ms);
/**
* Get the period in milliseconds set for delaying the enabling of a bonded
* Delay period on success, negative value otherwise.
*/
int
-rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id);
+rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id);
#ifdef __cplusplus
}
static void
-show_warnings(uint8_t slave_id)
+show_warnings(uint16_t slave_id)
{
struct port *port = &mode_8023ad_ports[slave_id];
uint8_t warnings;
* @param port Port on which LACPDU was received.
*/
static void
-rx_machine(struct bond_dev_private *internals, uint8_t slave_id,
+rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
struct lacpdu *lacp)
{
struct port *agg, *port = &mode_8023ad_ports[slave_id];
* @param port Port to handle state machine.
*/
static void
-periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
+periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
struct port *port = &mode_8023ad_ports[slave_id];
/* Calculate if either site is LACP enabled */
* @param port Port to handle state machine.
*/
static void
-mux_machine(struct bond_dev_private *internals, uint8_t slave_id)
+mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
struct port *port = &mode_8023ad_ports[slave_id];
* @param port
*/
static void
-tx_machine(struct bond_dev_private *internals, uint8_t slave_id)
+tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
struct port *agg, *port = &mode_8023ad_ports[slave_id];
selection_logic(struct bond_dev_private *internals, uint8_t slave_id)
{
struct port *agg, *port;
- uint8_t slaves_count, new_agg_id, i, j = 0;
- uint8_t *slaves;
+ uint16_t slaves_count, new_agg_id, i, j = 0;
+ uint16_t *slaves;
uint64_t agg_bandwidth[8] = {0};
uint64_t agg_count[8] = {0};
- uint8_t default_slave = 0;
+ uint16_t default_slave = 0;
uint8_t mode_count_id, mode_band_id;
struct rte_eth_link link_info;
}
void
-bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
+bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev,
+ uint16_t slave_id)
{
struct bond_dev_private *internals = bond_dev->data->dev_private;
memcpy(&port->actor, &initial, sizeof(struct port_params));
/* Standard requires that port ID must be grater than 0.
* Add 1 do get corresponding port_number */
- port->actor.port_number = rte_cpu_to_be_16((uint16_t)slave_id + 1);
+ port->actor.port_number = rte_cpu_to_be_16(slave_id + 1);
memcpy(&port->partner, &initial, sizeof(struct port_params));
int
bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
- uint8_t slave_id)
+ uint16_t slave_id)
{
struct bond_dev_private *internals = bond_dev->data->dev_private;
void *pkt = NULL;
struct port *port;
- uint8_t i;
+ uint16_t i;
/* Given slave must be in active list */
RTE_ASSERT(find_slave_by_id(internals->active_slaves,
struct bond_dev_private *internals = bond_dev->data->dev_private;
struct ether_addr slave_addr;
struct port *slave, *agg_slave;
- uint8_t slave_id, i, j;
+ uint16_t slave_id, i, j;
bond_mode_8023ad_stop(bond_dev);
void
bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
- uint8_t slave_id, struct rte_mbuf *pkt)
+ uint16_t slave_id, struct rte_mbuf *pkt)
{
struct mode8023ad_private *mode4 = &internals->mode4;
struct port *port = &mode_8023ad_ports[slave_id];
}
int
-rte_eth_bond_8023ad_conf_get(uint8_t port_id,
+rte_eth_bond_8023ad_conf_get(uint16_t port_id,
struct rte_eth_bond_8023ad_conf *conf)
{
struct rte_eth_dev *bond_dev;
}
int
-rte_eth_bond_8023ad_agg_selection_set(uint8_t port_id,
+rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
enum rte_bond_8023ad_agg_selection agg_selection)
{
struct rte_eth_dev *bond_dev;
return 0;
}
-int rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id)
+int rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id)
{
struct rte_eth_dev *bond_dev;
struct bond_dev_private *internals;
static int
-bond_8023ad_setup_validate(uint8_t port_id,
+bond_8023ad_setup_validate(uint16_t port_id,
struct rte_eth_bond_8023ad_conf *conf)
{
if (valid_bonded_port_id(port_id) != 0)
return 0;
}
+
int
-rte_eth_bond_8023ad_setup(uint8_t port_id,
+rte_eth_bond_8023ad_setup(uint16_t port_id,
struct rte_eth_bond_8023ad_conf *conf)
{
struct rte_eth_dev *bond_dev;
int
-rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
+rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
struct rte_eth_bond_8023ad_slave_info *info)
{
struct rte_eth_dev *bond_dev;
}
static int
-bond_8023ad_ext_validate(uint8_t port_id, uint8_t slave_id)
+bond_8023ad_ext_validate(uint16_t port_id, uint16_t slave_id)
{
struct rte_eth_dev *bond_dev;
struct bond_dev_private *internals;
}
int
-rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled)
+rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
+ int enabled)
{
struct port *port;
int res;
}
int
-rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled)
+rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
+ int enabled)
{
struct port *port;
int res;
}
int
-rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id)
+rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id)
{
struct port *port;
int err;
}
int
-rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id)
+rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id)
{
struct port *port;
int err;
}
int
-rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
+rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
struct rte_mbuf *lacp_pkt)
{
struct port *port;
}
int
-rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port)
+rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port)
{
int retval = 0;
struct rte_eth_dev *dev = &rte_eth_devices[port];
}
int
-rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port)
+rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port)
{
int retval = 0;
struct rte_eth_dev *dev = &rte_eth_devices[port];
#define MARKER_TLV_TYPE_INFO 0x01
#define MARKER_TLV_TYPE_RESP 0x02
-typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint8_t slave_id,
+typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint16_t slave_id,
struct rte_mbuf *lacp_pkt);
enum rte_bond_8023ad_selection {
struct port_params actor;
uint8_t partner_state;
struct port_params partner;
- uint8_t agg_port_id;
+ uint16_t agg_port_id;
};
/**
* -EINVAL if conf is NULL
*/
int
-rte_eth_bond_8023ad_conf_get(uint8_t port_id,
+rte_eth_bond_8023ad_conf_get(uint16_t port_id,
struct rte_eth_bond_8023ad_conf *conf);
/**
* -EINVAL if configuration is invalid.
*/
int
-rte_eth_bond_8023ad_setup(uint8_t port_id,
+rte_eth_bond_8023ad_setup(uint16_t port_id,
struct rte_eth_bond_8023ad_conf *conf);
/**
* bonded device or is not inactive).
*/
int
-rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id,
+rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
struct rte_eth_bond_8023ad_slave_info *conf);
#ifdef __cplusplus
* -EINVAL if slave is not valid.
*/
int
-rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled);
+rte_eth_bond_8023ad_ext_collect(uint16_t port_id, uint16_t slave_id,
+ int enabled);
/**
* Get COLLECTING flag from slave port actor state.
* -EINVAL if slave is not valid.
*/
int
-rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id);
+rte_eth_bond_8023ad_ext_collect_get(uint16_t port_id, uint16_t slave_id);
/**
* Configure a slave port to start distributing.
* -EINVAL if slave is not valid.
*/
int
-rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled);
+rte_eth_bond_8023ad_ext_distrib(uint16_t port_id, uint16_t slave_id,
+ int enabled);
/**
* Get DISTRIBUTING flag from slave port actor state.
* -EINVAL if slave is not valid.
*/
int
-rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id);
+rte_eth_bond_8023ad_ext_distrib_get(uint16_t port_id, uint16_t slave_id);
/**
* LACPDU transmit path for external 802.3ad state machine. Caller retains
* 0 on success, negative value otherwise.
*/
int
-rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id,
+rte_eth_bond_8023ad_ext_slowtx(uint16_t port_id, uint16_t slave_id,
struct rte_mbuf *lacp_pkt);
/**
* 0 on success, negative value otherwise.
*/
int
-rte_eth_bond_8023ad_dedicated_queues_enable(uint8_t port_id);
+rte_eth_bond_8023ad_dedicated_queues_enable(uint16_t port_id);
/**
* Disable slow queue on slaves
*
*/
int
-rte_eth_bond_8023ad_dedicated_queues_disable(uint8_t port_id);
+rte_eth_bond_8023ad_dedicated_queues_disable(uint16_t port_id);
/*
* Get aggregator mode for 8023ad
* agregator mode on success, negative value otherwise
*/
int
-rte_eth_bond_8023ad_agg_selection_get(uint8_t port_id);
+rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id);
/**
* Set aggregator mode for 8023ad
* 0 on success, negative value otherwise
*/
int
-rte_eth_bond_8023ad_agg_selection_set(uint8_t port_id,
+rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
enum rte_bond_8023ad_agg_selection agg_selection);
#endif /* RTE_ETH_BOND_8023AD_H_ */
*/
void
bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
- uint8_t slave_id, struct rte_mbuf *pkt);
+ uint16_t slave_id, struct rte_mbuf *pkt);
/**
* @internal
* 0 on success, negative value otherwise.
*/
void
-bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint8_t port_id);
+bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint16_t port_id);
/**
* @internal
* 0 on success, negative value otherwise.
*/
int
-bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint8_t slave_pos);
+bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint16_t slave_pos);
/**
* Updates state when MAC was changed on bonded device or one of its slaves.
int
bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
- uint8_t slave_port);
+ uint16_t slave_port);
int
-bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint8_t slave_port);
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port);
int
-bond_8023ad_slow_pkt_hw_filter_supported(uint8_t port_id);
+bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id);
#endif /* RTE_ETH_BOND_8023AD_H_ */
rte_spinlock_unlock(&internals->mode6.lock);
}
-uint8_t
+uint16_t
bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset,
struct bond_dev_private *internals)
{
return internals->current_primary_port;
}
-uint8_t
+uint16_t
bond_mode_alb_arp_upd(struct client_data *client_info,
struct rte_mbuf *pkt, struct bond_dev_private *internals)
{
struct ether_hdr *eth_h;
struct arp_hdr *arp_h;
- uint8_t slave_idx;
+ uint16_t slave_idx;
rte_spinlock_lock(&internals->mode6.lock);
eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
uint32_t cli_ip;
/**< Client IP address */
- uint8_t slave_idx;
+ uint16_t slave_idx;
/**< Index of slave on which we connect with that client */
uint8_t in_use;
/**< Flag indicating if entry in client table is currently used */
* @return
* Index of slave on which packet should be sent.
*/
-uint8_t
+uint16_t
bond_mode_alb_arp_xmit(struct ether_hdr *eth_h, uint16_t offset,
struct bond_dev_private *internals);
* @return
* Index of slawe on which packet should be sent.
*/
-uint8_t
+uint16_t
bond_mode_alb_arp_upd(struct client_data *client_info,
struct rte_mbuf *pkt, struct bond_dev_private *internals);
}
int
-valid_bonded_port_id(uint8_t port_id)
+valid_bonded_port_id(uint16_t port_id)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
}
int
-valid_slave_port_id(uint8_t port_id, uint8_t mode)
+valid_slave_port_id(uint16_t port_id, uint8_t mode)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
}
void
-activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
+activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
uint8_t active_count = internals->active_slave_count;
}
void
-deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
+deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id)
{
- uint8_t slave_pos;
+ uint16_t slave_pos;
struct bond_dev_private *internals = eth_dev->data->dev_private;
- uint8_t active_count = internals->active_slave_count;
+ uint16_t active_count = internals->active_slave_count;
if (internals->mode == BONDING_MODE_8023AD) {
bond_mode_8023ad_stop(eth_dev);
{
struct bond_dev_private *internals;
char devargs[52];
- uint8_t port_id;
+ uint16_t port_id;
int ret;
if (name == NULL) {
}
static int
-slave_vlan_filter_set(uint8_t bonded_port_id, uint8_t slave_port_id)
+slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
}
static int
-__eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
+__eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
struct bond_dev_private *internals;
}
int
-rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t slave_port_id)
+rte_eth_bond_slave_add(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
}
static int
-__eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
+__eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
+ uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
}
int
-rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t slave_port_id)
+rte_eth_bond_slave_remove(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
}
int
-rte_eth_bond_mode_set(uint8_t bonded_port_id, uint8_t mode)
+rte_eth_bond_mode_set(uint16_t bonded_port_id, uint8_t mode)
{
if (valid_bonded_port_id(bonded_port_id) != 0)
return -1;
}
int
-rte_eth_bond_mode_get(uint8_t bonded_port_id)
+rte_eth_bond_mode_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
}
int
-rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t slave_port_id)
+rte_eth_bond_primary_set(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct bond_dev_private *internals;
}
int
-rte_eth_bond_primary_get(uint8_t bonded_port_id)
+rte_eth_bond_primary_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
}
int
-rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t slaves[], uint8_t len)
+rte_eth_bond_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len)
{
struct bond_dev_private *internals;
uint8_t i;
}
int
-rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, uint8_t slaves[],
- uint8_t len)
+rte_eth_bond_active_slaves_get(uint16_t bonded_port_id, uint16_t slaves[],
+ uint16_t len)
{
struct bond_dev_private *internals;
if (internals->active_slave_count > len)
return -1;
- memcpy(slaves, internals->active_slaves, internals->active_slave_count);
+ memcpy(slaves, internals->active_slaves,
+ internals->active_slave_count * sizeof(internals->active_slaves[0]));
return internals->active_slave_count;
}
int
-rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
+rte_eth_bond_mac_address_set(uint16_t bonded_port_id,
struct ether_addr *mac_addr)
{
struct rte_eth_dev *bonded_eth_dev;
}
int
-rte_eth_bond_mac_address_reset(uint8_t bonded_port_id)
+rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
}
int
-rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, uint8_t policy)
+rte_eth_bond_xmit_policy_set(uint16_t bonded_port_id, uint8_t policy)
{
struct bond_dev_private *internals;
}
int
-rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id)
+rte_eth_bond_xmit_policy_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
}
int
-rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, uint32_t internal_ms)
+rte_eth_bond_link_monitoring_set(uint16_t bonded_port_id, uint32_t internal_ms)
{
struct bond_dev_private *internals;
}
int
-rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id)
+rte_eth_bond_link_monitoring_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
}
int
-rte_eth_bond_link_down_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
+rte_eth_bond_link_down_prop_delay_set(uint16_t bonded_port_id,
+ uint32_t delay_ms)
{
struct bond_dev_private *internals;
}
int
-rte_eth_bond_link_down_prop_delay_get(uint8_t bonded_port_id)
+rte_eth_bond_link_down_prop_delay_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
}
int
-rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, uint32_t delay_ms)
+rte_eth_bond_link_up_prop_delay_set(uint16_t bonded_port_id, uint32_t delay_ms)
{
struct bond_dev_private *internals;
}
int
-rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id)
+rte_eth_bond_link_up_prop_delay_get(uint16_t bonded_port_id)
{
struct bond_dev_private *internals;
return -1;
} else
slave_ports->slaves[slave_ports->slave_count++] =
- (uint8_t)port_id;
+ port_id;
}
return 0;
}
int
bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
- uint8_t slave_port) {
+ uint16_t slave_port) {
struct rte_eth_dev_info slave_info;
struct rte_flow_error error;
struct bond_dev_private *internals = (struct bond_dev_private *)
}
int
-bond_8023ad_slow_pkt_hw_filter_supported(uint8_t port_id) {
+bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id) {
struct rte_eth_dev *bond_dev = &rte_eth_devices[port_id];
struct bond_dev_private *internals = (struct bond_dev_private *)
(bond_dev->data->dev_private);
struct rte_eth_dev_info bond_info;
- uint8_t idx;
+ uint16_t idx;
/* Verify if all slaves in bonding supports flow director and */
if (internals->slave_count > 0) {
}
int
-bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint8_t slave_port) {
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port) {
struct rte_flow_error error;
struct bond_dev_private *internals = (struct bond_dev_private *)
struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
struct bond_dev_private *internals = bd_rx_q->dev_private;
uint16_t num_rx_total = 0; /* Total number of received packets */
- uint8_t slaves[RTE_MAX_ETHPORTS];
- uint8_t slave_count;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slave_count;
- uint8_t i, idx;
+ uint16_t i, idx;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
struct bond_dev_private *internals;
struct bond_tx_queue *bd_tx_q;
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t num_of_slaves;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
/* positions in slaves, not ID */
uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
uint8_t distributing_count;
const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
uint16_t num_rx_total = 0; /* Total number of received packets */
- uint8_t slaves[RTE_MAX_ETHPORTS];
- uint8_t slave_count, idx;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slave_count, idx;
uint8_t collecting; /* current slave collecting status */
const uint8_t promisc = internals->promiscuous_en;
struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts];
uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t num_of_slaves;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t num_tx_total = 0, num_tx_slave;
}
static void
-bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
+bandwidth_left(uint16_t port_id, uint64_t load, uint8_t update_idx,
struct bwg_slave *bwg_slave)
{
struct rte_eth_link link_status;
struct rte_eth_dev *primary_port =
&rte_eth_devices[internals->primary_port];
uint16_t num_tx_total = 0;
- uint8_t i, j;
+ uint16_t i, j;
- uint8_t num_of_slaves = internals->active_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t num_of_slaves = internals->active_slave_count;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
struct ether_hdr *ether_hdr;
struct ether_addr primary_slave_addr;
uint16_t num_send, num_not_send = 0;
uint16_t num_tx_total = 0;
- uint8_t slave_idx;
+ uint16_t slave_idx;
int i, j;
struct bond_dev_private *internals;
struct bond_tx_queue *bd_tx_q;
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t num_of_slaves;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
struct bond_dev_private *internals;
struct bond_tx_queue *bd_tx_q;
- uint8_t num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t num_of_slaves;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
/* positions in slaves, not ID */
uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
uint8_t distributing_count;
struct bond_tx_queue *bd_tx_q;
uint8_t tx_failed_flag = 0, num_of_slaves;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t max_nb_of_tx_pkts = 0;
void
bond_ethdev_primary_set(struct bond_dev_private *internals,
- uint8_t slave_port_id)
+ uint16_t slave_port_id)
{
int i;
bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
int res;
- uint8_t i;
+ uint16_t i;
struct bond_dev_private *internals = dev->data->dev_private;
/* don't do this while a slave is being added */
rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
for (i = 0; i < internals->slave_count; i++) {
- uint8_t port_id = internals->slaves[i].port_id;
+ uint16_t port_id = internals->slaves[i].port_id;
res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
if (res == ENOTSUP)
static int
bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)
{
- void (*link_update)(uint8_t port_id, struct rte_eth_link *eth_link);
+ void (*link_update)(uint16_t port_id, struct rte_eth_link *eth_link);
struct bond_dev_private *bond_ctx;
struct rte_eth_link slave_link;
}
int
-bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
void *param, void *ret_param __rte_unused)
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals = dev->data->dev_private;
struct rte_kvargs *kvlist = internals->kvlist;
int arg_count;
- uint8_t port_id = dev - rte_eth_devices;
+ uint16_t port_id = dev - rte_eth_devices;
uint8_t agg_mode;
static const uint8_t default_rss_key[40] = {
/* Parse/set primary slave port id*/
arg_count = rte_kvargs_count(kvlist, PMD_BOND_PRIMARY_SLAVE_KVARG);
if (arg_count == 1) {
- uint8_t primary_slave_port_id;
+ uint16_t primary_slave_port_id;
if (rte_kvargs_process(kvlist,
PMD_BOND_PRIMARY_SLAVE_KVARG,
}
/* Set balance mode transmit policy*/
- if (rte_eth_bond_primary_set(port_id, (uint8_t)primary_slave_port_id)
+ if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
!= 0) {
RTE_LOG(ERR, EAL,
"Failed to set primary slave port %d on bonded device %s\n",
/** Bonded slave devices structure */
struct bond_ethdev_slave_ports {
- uint8_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */
- uint8_t slave_count; /**< Number of slaves */
+ uint16_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */
+ uint16_t slave_count; /**< Number of slaves */
};
struct bond_slave_details {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t link_status_poll_enabled;
uint8_t link_status_wait_to_complete;
/** Link Bonding PMD device private configuration Structure */
struct bond_dev_private {
- uint8_t port_id; /**< Port Id of Bonded Port */
+ uint16_t port_id; /**< Port Id of Bonded Port */
uint8_t mode; /**< Link Bonding Mode */
rte_spinlock_t lock;
- uint8_t primary_port; /**< Primary Slave Port */
- uint8_t current_primary_port; /**< Primary Slave Port */
- uint8_t user_defined_primary_port;
+ uint16_t primary_port; /**< Primary Slave Port */
+ uint16_t current_primary_port; /**< Primary Slave Port */
+ uint16_t user_defined_primary_port;
/**< Flag for whether primary port is user defined or not */
uint8_t balance_xmit_policy;
uint16_t nb_rx_queues; /**< Total number of rx queues */
uint16_t nb_tx_queues; /**< Total number of tx queues*/
- uint8_t active_slave; /**< Next active_slave to poll */
- uint8_t active_slave_count; /**< Number of active slaves */
- uint8_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */
+ uint16_t active_slave; /**< Next active_slave to poll */
+ uint16_t active_slave_count; /**< Number of active slaves */
+ uint16_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */
- uint8_t slave_count; /**< Number of bonded slaves */
+ uint16_t slave_count; /**< Number of bonded slaves */
struct bond_slave_details slaves[RTE_MAX_ETHPORTS];
/**< Arary of bonded slaves details */
struct mode8023ad_private mode4;
- uint8_t tlb_slaves_order[RTE_MAX_ETHPORTS]; /* TLB active slaves send order */
+ uint16_t tlb_slaves_order[RTE_MAX_ETHPORTS];
+ /**< TLB active slaves send order */
struct mode_alb_private mode6;
uint32_t rx_offload_capa; /** Rx offload capability */
/* Search given slave array to find position of given id.
* Return slave pos or slaves_count if not found. */
-static inline uint8_t
-find_slave_by_id(uint8_t *slaves, uint8_t slaves_count, uint8_t slave_id) {
+static inline uint16_t
+find_slave_by_id(uint16_t *slaves, uint16_t slaves_count, uint16_t slave_id) {
- uint8_t pos;
+ uint16_t pos;
for (pos = 0; pos < slaves_count; pos++) {
if (slave_id == slaves[pos])
break;
}
int
-valid_port_id(uint8_t port_id);
+valid_port_id(uint16_t port_id);
int
-valid_bonded_port_id(uint8_t port_id);
+valid_bonded_port_id(uint16_t port_id);
int
-valid_slave_port_id(uint8_t port_id, uint8_t mode);
+valid_slave_port_id(uint16_t port_id, uint8_t mode);
void
-deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id);
+deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
void
-activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id);
+activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
void
link_properties_set(struct rte_eth_dev *bonded_eth_dev,
void
bond_ethdev_primary_set(struct bond_dev_private *internals,
- uint8_t slave_port_id);
+ uint16_t slave_port_id);
int
-bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
void *param, void *ret_param);
int
rte_em_dev_atomic_read_link_status(dev, &link);
if (link.link_status) {
PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
- dev->data->port_id, (unsigned)link.link_speed,
+ dev->data->port_id, link.link_speed,
link.link_duplex == ETH_LINK_FULL_DUPLEX ?
"full-duplex" : "half-duplex");
} else {
uint16_t nb_rx_hold; /**< number of held free RX desc. */
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
/** Total number of TX descriptors ready to be allocated. */
uint16_t nb_tx_free;
uint16_t queue_id; /**< TX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint16_t reg_idx; /**< RX queue register index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
/**< Index of first used TX descriptor. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx; /**< TX queue register index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold register. */
}
int
-failsafe_eth_rmv_event_callback(uint8_t port_id __rte_unused,
+failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused,
enum rte_eth_event_type event __rte_unused,
void *cb_arg, void *out __rte_unused)
{
}
int
-failsafe_eth_lsc_event_callback(uint8_t port_id __rte_unused,
+failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused,
enum rte_eth_event_type event __rte_unused,
void *cb_arg, void *out __rte_unused)
{
void failsafe_dev_remove(struct rte_eth_dev *dev);
void failsafe_stats_increment(struct rte_eth_stats *to,
struct rte_eth_stats *from);
-int failsafe_eth_rmv_event_callback(uint8_t port_id,
+int failsafe_eth_rmv_event_callback(uint16_t port_id,
enum rte_eth_event_type type,
void *arg, void *out);
-int failsafe_eth_lsc_event_callback(uint8_t port_id,
+int failsafe_eth_lsc_event_callback(uint16_t port_id,
enum rte_eth_event_type event,
void *cb_arg, void *out);
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
uint16_t rxrearm_start; /* the idx we start the re-arming from */
uint16_t rx_using_sse; /* indicates that vector RX is in use */
- uint8_t port_id;
+ uint16_t port_id;
uint8_t drop_en;
uint8_t rx_deferred_start; /* don't start this queue in dev start. */
uint16_t rx_ftag_en; /* indicates FTAG RX supported */
volatile uint32_t *tail_ptr;
uint32_t txq_flags; /* Holds flags for this TXq */
uint16_t nb_desc;
- uint8_t port_id;
+ uint16_t port_id;
uint8_t tx_deferred_start; /** don't start this queue in dev start. */
uint16_t queue_id;
uint16_t tx_ftag_en; /* indicates FTAG TX supported */
}
static inline void
-fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint8_t in_port)
+fm10k_pktmbuf_reset(struct rte_mbuf *mb, uint16_t in_port)
{
rte_mbuf_refcnt_set(mb, 1);
mb->next = NULL;
EXPORT_MAP := rte_pmd_i40e_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# Add extra flags for base driver files (also known as shared code)
hw->adapter_stopped = 0;
if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
- PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; autonegotiation disabled",
- dev->data->port_id);
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, autonegotiation disabled",
+ dev->data->port_id);
return -EINVAL;
}
uint16_t rxrearm_start; /**< the idx we start the re-arming from */
uint64_t mbuf_initializer; /**< value to init mbufs */
- uint8_t port_id; /**< device port ID */
+ uint16_t port_id; /**< device port ID */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise */
uint16_t queue_id; /**< RX queue index */
uint16_t reg_idx; /**< RX queue register index */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx;
uint32_t txq_flags;
#include "rte_pmd_i40e.h"
int
-rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
+rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
+rte_pmd_i40e_set_tx_loopback(uint16_t port, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
struct ether_addr *mac_addr)
{
struct i40e_mac_filter *f;
/* Set vlan strip on/off for specific VF from host */
int
-rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
+rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
return ret;
}
-int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
uint16_t vlan_id)
{
struct rte_eth_dev *dev;
return ret;
}
-int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
uint8_t on)
{
struct rte_eth_dev *dev;
return ret;
}
-int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
+int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
return count;
}
-int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
uint64_t vf_mask, uint8_t on)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_i40e_get_vf_stats(uint8_t port,
+rte_pmd_i40e_get_vf_stats(uint16_t port,
uint16_t vf_id,
struct rte_eth_stats *stats)
{
}
int
-rte_pmd_i40e_reset_vf_stats(uint8_t port,
+rte_pmd_i40e_reset_vf_stats(uint16_t port,
uint16_t vf_id)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
+rte_pmd_i40e_set_vf_max_bw(uint16_t port, uint16_t vf_id, uint32_t bw)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
}
int
-rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port, uint16_t vf_id,
uint8_t tc_num, uint8_t *bw_weight)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
+rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port, uint16_t vf_id,
uint8_t tc_no, uint32_t bw)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
+rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map)
{
struct rte_eth_dev *dev;
struct i40e_pf *pf;
/* Check if the profile info exists */
static int
-i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
+i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
{
struct rte_eth_dev *dev = &rte_eth_devices[port];
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
}
int
-rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
uint32_t size,
enum rte_pmd_i40e_package_op op)
{
}
int
-rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
+rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size)
{
struct rte_eth_dev *dev;
struct i40e_hw *hw;
int
rte_pmd_i40e_ptype_mapping_update(
- uint8_t port,
+ uint16_t port,
struct rte_pmd_i40e_ptype_mapping *mapping_items,
uint16_t count,
uint8_t exclusive)
return 0;
}
-int rte_pmd_i40e_ptype_mapping_reset(uint8_t port)
+int rte_pmd_i40e_ptype_mapping_reset(uint16_t port)
{
struct rte_eth_dev *dev;
}
int rte_pmd_i40e_ptype_mapping_get(
- uint8_t port,
+ uint16_t port,
struct rte_pmd_i40e_ptype_mapping *mapping_items,
uint16_t size,
uint16_t *count,
return 0;
}
-int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
uint32_t target,
uint8_t mask,
uint32_t pkt_type)
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* invalid.
*/
-int rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf);
+int rte_pmd_i40e_ping_vfs(uint16_t port, uint16_t vf);
/**
* Enable/Disable VF MAC anti spoofing.
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port,
+int rte_pmd_i40e_set_vf_mac_anti_spoof(uint16_t port,
uint16_t vf_id,
uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port,
+int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint16_t port,
uint16_t vf_id,
uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_tx_loopback(uint8_t port,
+int rte_pmd_i40e_set_tx_loopback(uint16_t port,
uint8_t on);
/**
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port,
+int rte_pmd_i40e_set_vf_unicast_promisc(uint16_t port,
uint16_t vf_id,
uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port,
+int rte_pmd_i40e_set_vf_multicast_promisc(uint16_t port,
uint16_t vf_id,
uint8_t on);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* or *mac_addr* is invalid.
*/
-int rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
struct ether_addr *mac_addr);
/**
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable vf vlan insert
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_vlan_insert(uint16_t port, uint16_t vf_id,
uint16_t vlan_id);
/**
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+int rte_pmd_i40e_set_vf_broadcast(uint16_t port, uint16_t vf_id,
uint8_t on);
/**
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on);
+int rte_pmd_i40e_set_vf_vlan_tag(uint16_t port, uint16_t vf_id, uint8_t on);
/**
* Enable/Disable VF VLAN filter
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+int rte_pmd_i40e_set_vf_vlan_filter(uint16_t port, uint16_t vlan_id,
uint64_t vf_mask, uint8_t on);
/**
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_get_vf_stats(uint8_t port,
+int rte_pmd_i40e_get_vf_stats(uint16_t port,
uint16_t vf_id,
struct rte_eth_stats *stats);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_reset_vf_stats(uint8_t port,
+int rte_pmd_i40e_reset_vf_stats(uint16_t port,
uint16_t vf_id);
/**
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_i40e_set_vf_max_bw(uint8_t port,
+int rte_pmd_i40e_set_vf_max_bw(uint16_t port,
uint16_t vf_id,
uint32_t bw);
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port,
+int rte_pmd_i40e_set_vf_tc_bw_alloc(uint16_t port,
uint16_t vf_id,
uint8_t tc_num,
uint8_t *bw_weight);
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port,
+int rte_pmd_i40e_set_vf_tc_max_bw(uint16_t port,
uint16_t vf_id,
uint8_t tc_no,
uint32_t bw);
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map);
+int rte_pmd_i40e_set_tc_strict_prio(uint16_t port, uint8_t tc_map);
/**
* Load/Unload a ddp package
* - (-EACCES) if profile does not exist.
* - (-ENOTSUP) if operation not supported.
*/
-int rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+int rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
uint32_t size,
enum rte_pmd_i40e_package_op op);
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size);
+int rte_pmd_i40e_get_ddp_list(uint16_t port, uint8_t *buff, uint32_t size);
/**
* Update hardware defined ptype to software defined packet type
* set other PTYPEs maps to PTYPE_UNKNOWN.
*/
int rte_pmd_i40e_ptype_mapping_update(
- uint8_t port,
+ uint16_t port,
struct rte_pmd_i40e_ptype_mapping *mapping_items,
uint16_t count,
uint8_t exclusive);
* @param port
* pointer to port identifier of the device
*/
-int rte_pmd_i40e_ptype_mapping_reset(uint8_t port);
+int rte_pmd_i40e_ptype_mapping_reset(uint16_t port);
/**
* Get hardware defined ptype to software defined ptype
* -(!0) only return mapping items which packet_type != RTE_PTYPE_UNKNOWN.
*/
int rte_pmd_i40e_ptype_mapping_get(
- uint8_t port,
+ uint16_t port,
struct rte_pmd_i40e_ptype_mapping *mapping_items,
uint16_t size,
uint16_t *count,
* @param pkt_type
* the new packet type to overwrite
*/
-int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+int rte_pmd_i40e_ptype_mapping_replace(uint16_t port,
uint32_t target,
uint8_t mask,
uint32_t pkt_type);
EXPORT_MAP := rte_pmd_ixgbe_version.map
-LIBABIVER := 1
+LIBABIVER := 2
ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
* - fixed speed: TODO implement
*/
if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
- PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported",
- dev->data->port_id);
+ PMD_INIT_LOG(ERR,
+ "Invalid link_speeds for port %u, fix speed not supported",
+ dev->data->port_id);
return -EINVAL;
}
uint16_t queue_id; /**< RX queue index. */
uint16_t reg_idx; /**< RX queue register index. */
uint16_t pkt_type_mask; /**< Packet type mask for different NICs. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint8_t rx_deferred_start; /**< not in global dev start. */
uint16_t tx_next_rs; /**< next desc to set RS bit */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx; /**< TX queue register index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
#include "rte_pmd_ixgbe.h"
int
-rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
struct ether_addr *mac_addr)
{
struct ixgbe_hw *hw;
}
int
-rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
+rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf)
{
struct ixgbe_hw *hw;
struct ixgbe_vf_info *vfinfo;
}
int
-rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
{
struct ixgbe_hw *hw;
struct ixgbe_mac_info *mac;
}
int
-rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
{
struct ixgbe_hw *hw;
struct ixgbe_mac_info *mac;
}
int
-rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
+rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id)
{
struct ixgbe_hw *hw;
uint32_t ctrl;
}
int
-rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
+rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on)
{
struct ixgbe_hw *hw;
uint32_t ctrl;
}
int
-rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
+rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on)
{
struct ixgbe_hw *hw;
uint32_t reg_value;
}
int
-rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on)
{
struct ixgbe_hw *hw;
uint32_t reg_value;
}
int
-rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev *dev;
struct rte_pci_device *pci_dev;
}
int
-rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
+rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf,
uint16_t rx_mask, uint8_t on)
{
int val = 0;
}
int
-rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev *dev;
struct rte_pci_device *pci_dev;
}
int
-rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
+rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on)
{
struct rte_eth_dev *dev;
struct rte_pci_device *pci_dev;
}
int
-rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
uint64_t vf_mask, uint8_t vlan_on)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
+rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp)
{
struct ixgbe_hw *hw;
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_macsec_disable(uint8_t port)
+rte_pmd_ixgbe_macsec_disable(uint16_t port)
{
struct ixgbe_hw *hw;
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac)
{
struct ixgbe_hw *hw;
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi)
{
struct ixgbe_hw *hw;
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
uint32_t pn, uint8_t *key)
{
struct ixgbe_hw *hw;
}
int
-rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
uint32_t pn, uint8_t *key)
{
struct ixgbe_hw *hw;
}
int
-rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
uint8_t tc_num,
uint8_t *bw_weight)
{
#ifdef RTE_LIBRTE_IXGBE_BYPASS
int
-rte_pmd_ixgbe_bypass_init(uint8_t port_id)
+rte_pmd_ixgbe_bypass_init(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_bypass_state_show(uint8_t port_id, uint32_t *state)
+rte_pmd_ixgbe_bypass_state_show(uint16_t port_id, uint32_t *state)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_bypass_state_set(uint8_t port_id, uint32_t *new_state)
+rte_pmd_ixgbe_bypass_state_set(uint16_t port_id, uint32_t *new_state)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_bypass_event_show(uint8_t port_id,
+rte_pmd_ixgbe_bypass_event_show(uint16_t port_id,
uint32_t event,
uint32_t *state)
{
}
int
-rte_pmd_ixgbe_bypass_event_store(uint8_t port_id,
+rte_pmd_ixgbe_bypass_event_store(uint16_t port_id,
uint32_t event,
uint32_t state)
{
}
int
-rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port_id, uint32_t timeout)
+rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port_id, uint32_t timeout)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_bypass_ver_show(uint8_t port_id, uint32_t *ver)
+rte_pmd_ixgbe_bypass_ver_show(uint16_t port_id, uint32_t *ver)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
+rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port_id, uint32_t *wd_timeout)
{
struct rte_eth_dev *dev;
}
int
-rte_pmd_ixgbe_bypass_wd_reset(uint8_t port_id)
+rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
{
struct rte_eth_dev *dev;
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* invalid.
*/
-int rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf);
+int rte_pmd_ixgbe_ping_vf(uint16_t port, uint16_t vf);
/**
* Set the VF MAC address.
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if *vf* or *mac_addr* is invalid.
*/
-int rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+int rte_pmd_ixgbe_set_vf_mac_addr(uint16_t port, uint16_t vf,
struct ether_addr *mac_addr);
/**
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf,
+ uint8_t on);
/**
* Enable/Disable VF MAC anti spoofing.
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable vf vlan insert
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+int rte_pmd_ixgbe_set_vf_vlan_insert(uint16_t port, uint16_t vf,
uint16_t vlan_id);
/**
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on);
+int rte_pmd_ixgbe_set_tx_loopback(uint16_t port, uint8_t on);
/**
* set all queues drop enable bit
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on);
+int rte_pmd_ixgbe_set_all_queues_drop_en(uint16_t port, uint8_t on);
/**
* set drop enable bit in the VF split rx control register
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on);
+int rte_pmd_ixgbe_set_vf_split_drop_en(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable vf vlan strip for all queues in a pool
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable MACsec offload.
* - (-ENODEV) if *port* invalid.
* - (-ENOTSUP) if hardware doesn't support this feature.
*/
-int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+int rte_pmd_ixgbe_macsec_enable(uint16_t port, uint8_t en, uint8_t rp);
/**
* Disable MACsec offload.
* - (-ENODEV) if *port* invalid.
* - (-ENOTSUP) if hardware doesn't support this feature.
*/
-int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+int rte_pmd_ixgbe_macsec_disable(uint16_t port);
/**
* Configure Tx SC (Secure Connection).
* - (-ENODEV) if *port* invalid.
* - (-ENOTSUP) if hardware doesn't support this feature.
*/
-int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+int rte_pmd_ixgbe_macsec_config_txsc(uint16_t port, uint8_t *mac);
/**
* Configure Rx SC (Secure Connection).
* - (-ENODEV) if *port* invalid.
* - (-ENOTSUP) if hardware doesn't support this feature.
*/
-int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+int rte_pmd_ixgbe_macsec_config_rxsc(uint16_t port, uint8_t *mac, uint16_t pi);
/**
* Enable Tx SA (Secure Association).
* - (-ENOTSUP) if hardware doesn't support this feature.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+int rte_pmd_ixgbe_macsec_select_txsa(uint16_t port, uint8_t idx, uint8_t an,
uint32_t pn, uint8_t *key);
/**
* - (-ENOTSUP) if hardware doesn't support this feature.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+int rte_pmd_ixgbe_macsec_select_rxsa(uint16_t port, uint8_t idx, uint8_t an,
uint32_t pn, uint8_t *key);
/**
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on);
+rte_pmd_ixgbe_set_vf_rxmode(uint16_t port, uint16_t vf, uint16_t rx_mask,
+ uint8_t on);
/**
* Enable or disable a VF traffic receive of an Ethernet device.
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on);
+rte_pmd_ixgbe_set_vf_rx(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable or disable a VF traffic transmit of the Ethernet device.
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on);
+rte_pmd_ixgbe_set_vf_tx(uint16_t port, uint16_t vf, uint8_t on);
/**
* Enable/Disable hardware VF VLAN filtering by an Ethernet device of
* - (-EINVAL) if bad parameter.
*/
int
-rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, uint64_t vf_mask, uint8_t vlan_on);
+rte_pmd_ixgbe_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on);
/**
* Set the rate limitation for a vf on an Ethernet device.
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk);
+int rte_pmd_ixgbe_set_vf_rate_limit(uint16_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
/**
* Set all the TCs' bandwidth weight.
* - (-EINVAL) if bad parameter.
* - (-ENOTSUP) not supported by firmware.
*/
-int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+int rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
uint8_t tc_num,
uint8_t *bw_weight);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_init(uint8_t port);
+int rte_pmd_ixgbe_bypass_init(uint16_t port);
/**
* Return bypass state.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_state_show(uint8_t port, uint32_t *state);
+int rte_pmd_ixgbe_bypass_state_show(uint16_t port, uint32_t *state);
/**
* Set bypass state
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_state_set(uint8_t port, uint32_t *new_state);
+int rte_pmd_ixgbe_bypass_state_set(uint16_t port, uint32_t *new_state);
/**
* Return bypass state when given event occurs.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_event_show(uint8_t port,
+int rte_pmd_ixgbe_bypass_event_show(uint16_t port,
uint32_t event,
uint32_t *state);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_event_store(uint8_t port,
+int rte_pmd_ixgbe_bypass_event_store(uint16_t port,
uint32_t event,
uint32_t state);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_wd_timeout_store(uint8_t port, uint32_t timeout);
+int rte_pmd_ixgbe_bypass_wd_timeout_store(uint16_t port, uint32_t timeout);
/**
* Get bypass firmware version.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_ver_show(uint8_t port, uint32_t *ver);
+int rte_pmd_ixgbe_bypass_ver_show(uint16_t port, uint32_t *ver);
/**
* Return bypass watchdog timeout in seconds
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_wd_timeout_show(uint8_t port, uint32_t *wd_timeout);
+int rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout);
/**
* Reset bypass watchdog timer
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_pmd_ixgbe_bypass_wd_reset(uint8_t port);
+int rte_pmd_ixgbe_bypass_wd_reset(uint16_t port);
/**
unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
unsigned int cqe_n:4; /* Log 2 of CQ elements. */
unsigned int elts_n:4; /* Log 2 of Mbufs. */
- unsigned int port_id:8;
unsigned int rss_hash:1; /* RSS hash result is enabled. */
unsigned int mark:1; /* Marked flow available on the queue. */
unsigned int pending_err:1; /* CQE error needs to be handled. */
- unsigned int :7; /* Remaining bits. */
+ unsigned int :15; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
+ uint16_t port_id;
uint16_t rq_ci;
uint16_t rq_pi;
uint16_t cq_ci;
nfp_net_dev_atomic_read_link_status(dev, &link);
if (link.link_status)
RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
- (int)(dev->data->port_id), (unsigned)link.link_speed,
+ dev->data->port_id, link.link_speed,
link.link_duplex == ETH_LINK_FULL_DUPLEX
? "full-duplex" : "half-duplex");
else
RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
- (int)(dev->data->port_id));
+ dev->data->port_id);
RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
pci_dev->addr.domain, pci_dev->addr.bus,
"tx_free_thresh must be less than the number of TX "
"descriptors. (tx_free_thresh=%u port=%d "
"queue=%d)\n", (unsigned int)tx_free_thresh,
- (int)dev->data->port_id, (int)queue_idx);
+ dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
if (unlikely(new_mb == NULL)) {
- RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u\n", (unsigned)rxq->port_id,
- (unsigned)rxq->qidx);
+ RTE_LOG_DP(DEBUG, PMD,
+ "RX mbuf alloc failed port_id=%u queue_id=%u\n",
+ rxq->port_id, (unsigned int)rxq->qidx);
nfp_net_mbuf_alloc_failed(rxq);
break;
}
return nb_hold;
PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
- (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold);
+ rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
nb_hold += rxq->nb_rx_hold;
rte_wmb();
if (nb_hold > rxq->rx_free_thresh) {
PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
- (unsigned)rxq->port_id, (unsigned)rxq->qidx,
+ rxq->port_id, (unsigned int)rxq->qidx,
(unsigned)nb_hold, (unsigned)avail);
nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
nb_hold = 0;
uint32_t tx_hthresh; /* not used by now. Future? */
uint32_t tx_wthresh; /* not used by now. Future? */
uint32_t txq_flags; /* not used by now. Future? */
- uint8_t port_id;
+ uint16_t port_id;
int qidx;
int tx_qcidx;
__le64 dma;
struct pmd_internals {
unsigned packet_size;
unsigned packet_copy;
- uint8_t port_id;
+ uint16_t port_id;
struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
struct pcap_rx_queue {
pcap_t *pcap;
- uint8_t in_port;
+ uint16_t in_port;
struct rte_mempool *mb_pool;
struct queue_stat rx_stat;
char name[PATH_MAX];
uint32_t speed; /* In Mb/s */
uint32_t adv_speed; /* Speed mask */
uint8_t duplex; /* In DUPLEX defs */
- uint8_t port; /* In PORT defs */
+ uint16_t port; /* In PORT defs */
bool autoneg;
uint32_t pause_config;
};
char args_str[32] = { 0 };
char ring_name[32] = { 0 };
- uint8_t port_id = RTE_MAX_ETHPORTS;
+ uint16_t port_id = RTE_MAX_ETHPORTS;
int ret;
/* do some parameter checking */
struct szedata2_rx_queue {
struct szedata *sze;
uint8_t rx_channel;
- uint8_t in_port;
+ uint16_t in_port;
struct rte_mempool *mb_pool;
volatile uint64_t rx_pkts;
volatile uint64_t rx_bytes;
uint16_t queue_id;
uint16_t precharge_cnt;
uint8_t rx_drop_en;
- uint8_t port_id;
+ uint16_t port_id;
uint8_t rbptr_offset;
} __rte_cache_aligned;
EXPORT_MAP := rte_pmd_vhost_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
rte_atomic32_t while_queuing;
struct pmd_internal *internal;
struct rte_mempool *mb_pool;
- uint8_t port;
+ uint16_t port;
uint16_t virtqueue_id;
struct vhost_stats stats;
};
};
int
-rte_eth_vhost_get_queue_event(uint8_t port_id,
+rte_eth_vhost_get_queue_event(uint16_t port_id,
struct rte_eth_vhost_queue_event *event)
{
struct rte_vhost_vring_state *state;
}
int
-rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
+rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
{
struct internal_list *list;
struct rte_eth_dev *eth_dev;
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_eth_vhost_get_queue_event(uint8_t port_id,
+int rte_eth_vhost_get_queue_event(uint16_t port_id,
struct rte_eth_vhost_queue_event *event);
/**
* - On success, the 'vid' associated with 'port_id'.
* - On failure, a negative value.
*/
-int rte_eth_vhost_get_vid_from_port_id(uint8_t port_id);
+int rte_eth_vhost_get_vid_from_port_id(uint16_t port_id);
#ifdef __cplusplus
}
uint8_t use_msix;
uint8_t modern;
uint8_t use_simple_rxtx;
- uint8_t port_id;
+ uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
uint32_t notify_off_multiplier;
uint8_t *isr;
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
uint16_t queue_id; /**< DPDK queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
/* Statistics */
struct virtnet_stats stats;
phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
uint16_t queue_id; /**< DPDK queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
/* Statistics */
struct virtnet_stats stats;
/**< memzone to populate hdr. */
const struct rte_memzone *virtio_net_hdr_mz;
phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
};
const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device TX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
uint16_t txdata_desc_size;
} vmxnet3_tx_queue_t;
const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device RX queue index. */
- uint8_t port_id; /**< Device port identifier. */
+ uint16_t port_id; /**< Device port identifier. */
} vmxnet3_rx_queue_t;
#endif /* _VMXNET3_RING_H_ */
addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], \
addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5])
-uint8_t slaves[RTE_MAX_ETHPORTS];
-uint8_t slaves_count;
+uint16_t slaves[RTE_MAX_ETHPORTS];
+uint16_t slaves_count;
-static uint8_t BOND_PORT = 0xff;
+static uint16_t BOND_PORT = 0xffff;
static struct rte_mempool *mbuf_pool;
};
static void
-slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
+slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
{
int retval;
uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
struct ether_addr addr;
rte_eth_macaddr_get(portid, &addr);
- printf("Port %u MAC: ", (unsigned)portid);
+ printf("Port %u MAC: ", portid);
PRINT_MAC(addr);
printf("\n");
}
rte_exit(EXIT_FAILURE,
"Faled to create bond port\n");
- BOND_PORT = (uint8_t)retval;
+ BOND_PORT = retval;
retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &port_conf);
if (retval != 0)
struct cmdline *cl,
__attribute__((unused)) void *data)
{
- uint8_t slaves[16] = {0};
+ uint16_t slaves[16] = {0};
uint8_t len = 16;
struct ether_addr addr;
- uint8_t i = 0;
+ uint16_t i = 0;
while (i < slaves_count) {
rte_eth_macaddr_get(i, &addr);
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rxRings = 1, txRings = rte_lcore_count() - 1;
struct rte_eth_link link;
rte_eth_link_get_nowait(port, &link);
while (!link.link_status) {
- printf("Waiting for Link up on port %"PRIu8"\n", port);
+ printf("Waiting for Link up on port %"PRIu16"\n", port);
sleep(1);
rte_eth_link_get_nowait(port, &link);
}
if (!link.link_status) {
- printf("Link down on port %"PRIu8"\n", port);
+ printf("Link down on port %"PRIu16"\n", port);
return 0;
}
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
+ port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
static int
lcore_rx(struct lcore_params *p)
{
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
uint8_t port;
struct rte_mbuf *bufs[BURST_SIZE*2];
}
static inline void
-flush_all_ports(struct output_buffer *tx_buffers, uint8_t nb_ports)
+flush_all_ports(struct output_buffer *tx_buffers, uint16_t nb_ports)
{
- uint8_t outp;
+ uint16_t outp;
for (outp = 0; outp < nb_ports; outp++) {
/* skip ports that are not enabled */
lcore_tx(struct rte_ring *in_r)
{
static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS];
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
- uint8_t port;
+ uint16_t port;
for (port = 0; port < nb_ports; port++) {
/* skip ports that are not enabled */
struct rte_ring *rx_dist_ring;
unsigned lcore_id, worker_id = 0;
unsigned nb_ports;
- uint8_t portid;
- uint8_t nb_ports_available;
+ uint16_t portid;
+ uint16_t nb_ports_available;
uint64_t t, freq;
/* catch ctrl-c so we can print on exit */
continue;
}
/* init port */
- printf("Initializing port %u... done\n", (unsigned) portid);
+ printf("Initializing port %u... done\n", portid);
if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
+ rte_exit(EXIT_FAILURE, "Cannot initialize port %u\n",
portid);
}
static uint64_t output_cores_mask = 0;
/* Array storing port_id that is associated with each lcore */
-static uint8_t port_ids[RTE_MAX_LCORE];
+static uint16_t port_ids[RTE_MAX_LCORE];
/* Structure type for recording lcore-specific stats */
struct stats {
setup_port_lcore_affinities(void)
{
unsigned long i;
- uint8_t tx_port = 0;
- uint8_t rx_port = 0;
+ uint16_t tx_port = 0;
+ uint16_t rx_port = 0;
/* Setup port_ids[] array, and check masks were ok */
RTE_LCORE_FOREACH(i) {
uint16_t nb_txd = NB_TXD;
/* Initialise device and RX/TX queues */
- PRINT_INFO("Initialising port %u ...", (unsigned)port);
+ PRINT_INFO("Initialising port %u ...", port);
fflush(stdout);
ret = rte_eth_dev_configure(port, 1, 1, &port_conf);
if (ret < 0)
- FATAL_ERROR("Could not configure port%u (%d)",
- (unsigned)port, ret);
+ FATAL_ERROR("Could not configure port%u (%d)", port, ret);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
if (ret < 0)
FATAL_ERROR("Could not adjust number of descriptors for port%u (%d)",
- (unsigned)port, ret);
+ port, ret);
ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
rte_eth_dev_socket_id(port),
pktmbuf_pool);
if (ret < 0)
FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
- (unsigned)port, ret);
+ port, ret);
ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
rte_eth_dev_socket_id(port),
NULL);
if (ret < 0)
FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
- (unsigned)port, ret);
+ port, ret);
ret = rte_eth_dev_start(port);
if (ret < 0)
- FATAL_ERROR("Could not start port%u (%d)", (unsigned)port, ret);
+ FATAL_ERROR("Could not start port%u (%d)", port, ret);
rte_eth_promiscuous_enable(port);
}
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
{
int ret;
unsigned i,high_port;
- uint8_t nb_sys_ports, port;
+ uint16_t nb_sys_ports, port;
/* Associate signal_hanlder function with USR signals */
signal(SIGUSR1, signal_handler);
struct rte_mempool *indirect_pool;
struct rte_lpm *lpm;
struct rte_lpm6 *lpm6;
- uint8_t portid;
+ uint16_t portid;
};
#define MAX_RX_QUEUE_PER_LCORE 16
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
static inline void
l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
- uint8_t queueid, uint8_t port_in)
+ uint8_t queueid, uint16_t port_in)
{
struct rx_queue *rxq;
uint32_t i, len, next_hop;
- uint8_t port_out, ipv6;
+ uint8_t ipv6;
+ uint16_t port_out;
int32_t len2;
ipv6 = 0;
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct lcore_queue_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
portid = qconf->rx_queue_list[i].portid;
RTE_LOG(INFO, IP_FRAG, " -- lcoreid=%u portid=%d\n", lcore_id,
- (int) portid);
+ portid);
}
while (1) {
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up .Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
/* callback function to detect packet type for a queue of a port */
static uint16_t
-cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
uint16_t queueid = 0;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
if (init_routing_table() < 0)
rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
#ifdef RTE_LIBRTE_KNI
static int
-kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
+kni_config_network_interface(uint16_t port_id, uint8_t if_up) {
int ret = 0;
if (port_id >= rte_eth_dev_count())
}
static int
-kni_change_mtu(uint8_t port_id, unsigned new_mtu) {
+kni_change_mtu(uint16_t port_id, unsigned int new_mtu) {
int ret;
if (port_id >= rte_eth_dev_count())
struct rte_mempool *pool;
struct rte_lpm *lpm;
struct rte_lpm6 *lpm6;
- uint8_t portid;
+ uint16_t portid;
};
struct tx_lcore_stat {
* send burst of packets on an output interface.
*/
static inline uint32_t
-send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint8_t port)
+send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint16_t port)
{
uint32_t fill, len, k, n;
struct mbuf_table *txmb;
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t fill, lcore_id, len;
struct lcore_queue_conf *qconf;
}
static inline void
-reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
+reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
struct lcore_queue_conf *qconf, uint64_t tms)
{
struct ether_hdr *eth_hdr;
struct rx_queue *rxq;
void *d_addr_bytes;
uint32_t next_hop;
- uint8_t dst_port;
+ uint16_t dst_port;
rxq = &qconf->rx_queue_list[queue];
unsigned lcore_id;
uint64_t diff_tsc, cur_tsc, prev_tsc;
int i, j, nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct lcore_queue_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
for (i = 0; i < qconf->n_rx_queue; i++) {
portid = qconf->rx_queue_list[i].portid;
- RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%hhu\n", lcore_id,
+ RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%u\n", lcore_id,
portid);
}
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
qconf = &lcore_queue_conf[lcore];
for (i = 0; i < qconf->n_rx_queue; i++) {
- fprintf(stdout, " -- lcoreid=%u portid=%hhu "
+ fprintf(stdout, " -- lcoreid=%u portid=%u "
"frag tbl stat:\n",
lcore, qconf->rx_queue_list[i].portid);
rte_ip_frag_table_statistics_dump(stdout,
uint16_t queueid;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
if (init_routing_table() < 0)
rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
signal(SIGUSR1, signal_handler);
signal(SIGTERM, signal_handler);
/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;
-static uint8_t nb_ports = 0;
+static uint16_t nb_ports;
static int rx_queue_per_lcore = 1;
/* Send burst of packets on an output interface */
static void
-send_burst(struct lcore_queue_conf *qconf, uint8_t port)
+send_burst(struct lcore_queue_conf *qconf, uint16_t port)
{
struct rte_mbuf **m_table;
uint16_t n, queueid;
*/
static inline void
mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr,
- struct lcore_queue_conf *qconf, uint8_t port)
+ struct lcore_queue_conf *qconf, uint16_t port)
{
struct ether_hdr *ethdr;
uint16_t len;
struct ipv4_hdr *iphdr;
uint32_t dest_addr, port_mask, port_num, use_clone;
int32_t hash;
- uint8_t port;
+ uint16_t port;
union {
uint64_t as_int;
struct ether_addr as_addr;
send_timeout_burst(struct lcore_queue_conf *qconf)
{
uint64_t cur_tsc;
- uint8_t portid;
+ uint16_t portid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
cur_tsc = rte_rdtsc();
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
int i, j, nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct lcore_queue_conf *qconf;
lcore_id = rte_lcore_id();
portid = qconf->rx_queue_list[i];
RTE_LOG(INFO, IPv4_MULTICAST, " -- lcoreid=%u portid=%d\n",
- lcore_id, (int) portid);
+ lcore_id, portid);
}
while (1) {
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
uint16_t queueid;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
* Structure of port parameters
*/
struct kni_port_params {
- uint8_t port_id;/* Port ID */
+ uint16_t port_id;/* Port ID */
unsigned lcore_rx; /* lcore ID for RX */
unsigned lcore_tx; /* lcore ID for TX */
uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */
/* kni device statistics array */
static struct kni_interface_stats kni_stats[RTE_MAX_ETHPORTS];
-static int kni_change_mtu(uint8_t port_id, unsigned new_mtu);
-static int kni_config_network_interface(uint8_t port_id, uint8_t if_up);
+static int kni_change_mtu(uint16_t port_id, unsigned int new_mtu);
+static int kni_config_network_interface(uint16_t port_id, uint8_t if_up);
static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
static void
print_stats(void)
{
- uint8_t i;
+ uint16_t i;
printf("\n**KNI example application statistics**\n"
"====== ============== ============ ============ ============ ============\n"
static void
kni_ingress(struct kni_port_params *p)
{
- uint8_t i, port_id;
+ uint8_t i;
+ uint16_t port_id;
unsigned nb_rx, num;
uint32_t nb_kni;
struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
static void
kni_egress(struct kni_port_params *p)
{
- uint8_t i, port_id;
+ uint8_t i;
+ uint16_t port_id;
unsigned nb_tx, num;
uint32_t nb_kni;
struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
int i, j, nb_token;
char *str_fld[_NUM_FLD];
unsigned long int_fld[_NUM_FLD];
- uint8_t port_id, nb_kni_port_params = 0;
+ uint16_t port_id, nb_kni_port_params = 0;
memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
while (((p = strchr(p0, '(')) != NULL) &&
}
i = 0;
- port_id = (uint8_t)int_fld[i++];
+ port_id = int_fld[i++];
if (port_id >= RTE_MAX_ETHPORTS) {
printf("Port ID %d could not exceed the maximum %d\n",
port_id, RTE_MAX_ETHPORTS);
/* Callback for request of changing MTU */
static int
-kni_change_mtu(uint8_t port_id, unsigned new_mtu)
+kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
{
int ret;
struct rte_eth_conf conf;
/* Callback for request of configuring network interface up/down */
static int
-kni_config_network_interface(uint8_t port_id, uint8_t if_up)
+kni_config_network_interface(uint16_t port_id, uint8_t if_up)
{
int ret = 0;
}
static int
-kni_alloc(uint8_t port_id)
+kni_alloc(uint16_t port_id)
{
uint8_t i;
struct rte_kni *kni;
} else
snprintf(conf.name, RTE_KNI_NAMESIZE,
"vEth%u", port_id);
- conf.group_id = (uint16_t)port_id;
+ conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
/*
* The first KNI device associated to a port
}
static int
-kni_free_kni(uint8_t port_id)
+kni_free_kni(uint16_t port_id)
{
uint8_t i;
struct kni_port_params **p = kni_port_params_array;
main(int argc, char** argv)
{
int ret;
- uint8_t nb_sys_ports, port;
+ uint16_t nb_sys_ports, port;
unsigned i;
/* Associate signal_hanlder function with USR signals */
* coming from the mbuf_pool passed as a parameter.
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
" %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
- (unsigned)port,
+ port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
static __attribute__((noreturn)) void
lcore_main(void)
{
- const uint8_t nb_ports = rte_eth_dev_count();
- uint8_t port;
+ const uint16_t nb_ports = rte_eth_dev_count();
+ uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
{
struct rte_mempool *mbuf_pool;
unsigned nb_ports;
- uint8_t portid;
+ uint16_t portid;
/* Initialize the Environment Abstraction Layer (EAL). */
int ret = rte_eal_init(argc, argv);
/* Initialize all ports. */
for (portid = 0; portid < nb_ports; portid++)
if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
if (rte_lcore_count() > 1)
unsigned nb_ports_in_mask = 0;
int ret;
char name[RTE_JOBSTATS_NAMESIZE];
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
uint8_t i;
/* init EAL */
qconf->rx_port_list[qconf->n_rx_port] = portid;
qconf->n_rx_port++;
- printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
}
nb_ports_available = nb_ports;
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
- printf("Skipping disabled port %u\n", (unsigned) portid);
+ printf("Skipping disabled port %u\n", portid);
nb_ports_available--;
continue;
}
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot adjust number of descriptors: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
l2fwd_pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* init one TX queue on each port */
fflush(stdout);
rte_eth_dev_socket_id(portid),
NULL);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ ret, portid);
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
rte_eth_dev_socket_id(portid));
if (tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
rte_eth_tx_buffer_count_callback,
&port_statistics[portid].dropped);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
printf("done:\n");
rte_eth_promiscuous_enable(portid);
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
__attribute__((unused)) void *ptr_data)
{
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
- unsigned portid;
+ uint16_t portid;
total_packets_dropped = 0;
total_packets_tx = 0;
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
struct sigaction signal_handler;
qconf->rx_port_list[qconf->n_rx_port] = portid;
qconf->n_rx_port++;
printf("Lcore %u: RX port %u\n",
- rx_lcore_id, (unsigned) portid);
+ rx_lcore_id, portid);
}
nb_ports_available = nb_ports;
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
- printf("Skipping disabled port %u\n",
- (unsigned) portid);
+ printf("Skipping disabled port %u\n", portid);
nb_ports_available--;
continue;
}
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot adjust number of descriptors: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* init one TX queue on each port */
fflush(stdout);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
rte_eth_dev_socket_id(portid));
if (tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
rte_eth_tx_buffer_count_callback,
&port_statistics[portid].dropped);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_dev_start:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_promiscuous_enable(portid);
printf("Port %u, MAC address: "
"%02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
qconf->rx_port_list[qconf->n_rx_port] = portid;
qconf->n_rx_port++;
- printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
}
nb_ports_available = nb_ports;
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
- printf("Skipping disabled port %u\n", (unsigned) portid);
+ printf("Skipping disabled port %u\n", portid);
nb_ports_available--;
continue;
}
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot adjust number of descriptors: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
l2fwd_pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* init one TX queue on each port */
fflush(stdout);
NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
rte_eth_dev_socket_id(portid));
if (tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
rte_eth_tx_buffer_count_callback,
&port_statistics[portid].dropped);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
printf("done: \n");
rte_eth_promiscuous_enable(portid);
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
static int numa_on = 1; /**< NUMA is enabled by default. */
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
struct lcore_conf *qconf;
int socketid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
#define IPV6_L3FWD_NUM_ROUTES \
(sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
-static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
-static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static uint16_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static uint16_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count);
static inline enum freq_scale_hint_t power_freq_scaleup_heuristic( \
- unsigned lcore_id, uint8_t port_id, uint16_t queue_id);
+ unsigned int lcore_id, uint16_t port_id, uint16_t queue_id);
/* exit signal handler */
static void
key.port_dst, key.port_src, key.proto);
}
-static inline uint8_t
-get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t * ipv4_l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
/* Find destination port */
ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
}
-static inline uint8_t
-get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint16_t portid,
lookup_struct_t *ipv6_l3fwd_lookup_struct)
{
struct ipv6_5tuple key;
/* Find destination port */
ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-static inline uint8_t
-get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
+ return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
next_hop : portid);
}
}
static uint16_t
-cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
}
static int
-add_cb_parse_ptype(uint8_t portid, uint16_t queueid)
+add_cb_parse_ptype(uint16_t portid, uint16_t queueid)
{
printf("Port %d: softly parse packet type info\n", portid);
if (rte_eth_add_rx_callback(portid, queueid, cb_parse_ptype, NULL))
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
void *d_addr_bytes;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
static inline enum freq_scale_hint_t
power_freq_scaleup_heuristic(unsigned lcore_id,
- uint8_t port_id,
+ uint16_t port_id,
uint16_t queue_id)
{
/**
{
struct rte_epoll_event event[num];
int n, i;
- uint8_t port_id, queue_id;
+ uint16_t port_id;
+ uint8_t queue_id;
void *data;
RTE_LOG(INFO, L3FWD_POWER,
{
int i;
struct lcore_rx_queue *rx_queue;
- uint8_t port_id, queue_id;
+ uint8_t queue_id;
+ uint16_t port_id;
for (i = 0; i < qconf->n_rx_queue; ++i) {
rx_queue = &(qconf->rx_queue_list[i]);
static int event_register(struct lcore_conf *qconf)
{
struct lcore_rx_queue *rx_queue;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
uint32_t data;
int ret;
int i;
uint64_t prev_tsc, diff_tsc, cur_tsc;
uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
int i, j, nb_rx;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
enum freq_scale_hint_t lcore_scaleup_hint;
for (i = 0; i < qconf->n_rx_queue; i++) {
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%hhu "
+ RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
"rxqueueid=%hhu\n", lcore_id, portid, queueid);
}
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint8_t count, all_ports_up, print_flag = 0;
+ uint16_t portid;
struct rte_eth_link link;
printf("\nChecking link status");
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
+ uint16_t portid;
uint16_t org_rxq_intr = port_conf.intr_conf.rxq;
/* catch SIGINT and restore cpufreq governor to ondemand */
rte_eth_dev_socket_id(portid));
if (qconf->tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Can't allocate tx buffer for port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(qconf->tx_buffer[portid], MAX_PKT_BURST);
}
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
static rte_spinlock_t spinlock_conf[RTE_MAX_ETHPORTS] = {RTE_SPINLOCK_INITIALIZER};
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
uint16_t len;
(unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto);
}
-static inline uint8_t
-get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+static inline uint16_t
+get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
+ lookup_struct_t *l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
struct tcp_hdr *tcp;
/* Find destination port */
ret = rte_hash_lookup(l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : l3fwd_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-static inline uint8_t
-get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+static inline uint32_t
+get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
+ lookup_struct_t *l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t) ((rte_lpm_lookup(l3fwd_lookup_struct,
- rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
- next_hop : portid);
+ return ((rte_lpm_lookup(l3fwd_lookup_struct,
+ rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0) ?
+ next_hop : portid);
}
#endif
static inline void
-l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
+ lookup_struct_t *l3fwd_lookup_struct)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
void *tmp;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
- portid, queueid);
+ RTE_LOG(INFO, L3FWD, " --lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
}
while (1) {
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
static void
signal_handler(int signum)
{
- uint8_t portid;
- uint8_t nb_ports = rte_eth_dev_count();
+ uint16_t portid;
+ uint16_t nb_ports = rte_eth_dev_count();
/* When we receive a SIGINT signal */
if (signum == SIGINT) {
nb_lcore_params);
return -1;
}
- lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
+ lcore_params_array[nb_lcore_params].port_id = int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
struct rte_eth_txconf *txconf;
int ret;
unsigned nb_ports;
- uint16_t queueid;
+ uint16_t queueid, portid;
unsigned lcore_id;
uint32_t nb_lcores;
uint16_t n_tx_queue;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
signal(SIGINT, signal_handler);
/* init EAL */
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
send_single_packet(struct lcore_conf *qconf,
- struct rte_mbuf *m, uint8_t port)
+ struct rte_mbuf *m, uint16_t port)
{
uint16_t len;
lpm_check_ptype(int portid);
uint16_t
-em_cb_parse_ptype(uint8_t port, uint16_t queue, struct rte_mbuf *pkts[],
+em_cb_parse_ptype(uint16_t port, uint16_t queue, struct rte_mbuf *pkts[],
uint16_t nb_pkts, uint16_t max_pkts, void *user_param);
uint16_t
-lpm_cb_parse_ptype(uint8_t port, uint16_t queue, struct rte_mbuf *pkts[],
+lpm_cb_parse_ptype(uint16_t port, uint16_t queue, struct rte_mbuf *pkts[],
uint16_t nb_pkts, uint16_t max_pkts, void *user_param);
int
};
static __rte_always_inline void
-send_packetsx4(struct lcore_conf *qconf, uint8_t port, struct rte_mbuf *m[],
+send_packetsx4(struct lcore_conf *qconf, uint16_t port, struct rte_mbuf *m[],
uint32_t num)
{
uint32_t len, j, n;
}
uint16_t
-em_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+em_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
#define __L3FWD_EM_H__
static __rte_always_inline void
-l3fwd_em_simple_forward(struct rte_mbuf *m, uint8_t portid,
+l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
- uint8_t dst_port;
+ uint16_t dst_port;
uint32_t tcp_or_udp;
uint32_t l3_ptypes;
*/
static inline void
l3fwd_em_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t j;
static __rte_always_inline void
em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
- uint8_t portid, uint16_t dst_port[])
+ uint16_t portid, uint16_t dst_port[])
{
int i;
int32_t ret[EM_HASH_LOOKUP_COUNT];
EM_HASH_LOOKUP_COUNT, ret);
for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
- dst_port[i] = (uint8_t) ((ret[i] < 0) ?
+ dst_port[i] = ((ret[i] < 0) ?
portid : ipv4_l3fwd_out_if[ret[i]]);
if (dst_port[i] >= RTE_MAX_ETHPORTS ||
static __rte_always_inline void
em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
- uint8_t portid, uint16_t dst_port[])
+ uint16_t portid, uint16_t dst_port[])
{
int i;
int32_t ret[EM_HASH_LOOKUP_COUNT];
EM_HASH_LOOKUP_COUNT, ret);
for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
- dst_port[i] = (uint8_t) ((ret[i] < 0) ?
+ dst_port[i] = ((ret[i] < 0) ?
portid : ipv6_l3fwd_out_if[ret[i]]);
if (dst_port[i] >= RTE_MAX_ETHPORTS ||
static __rte_always_inline uint16_t
em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint8_t portid)
+ uint16_t portid)
{
- uint8_t next_hop;
+ uint16_t next_hop;
struct ipv4_hdr *ipv4_hdr;
struct ipv6_hdr *ipv6_hdr;
uint32_t tcp_or_udp;
*/
static inline void
l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t i, j, pos;
uint16_t dst_port[MAX_PKT_BURST];
struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS];
static inline uint16_t
-lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
+lpm_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
{
uint32_t next_hop;
struct rte_lpm *ipv4_l3fwd_lookup_struct =
}
static inline uint16_t
-lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
+lpm_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct)
{
uint32_t next_hop;
struct rte_lpm6 *ipv6_l3fwd_lookup_struct =
static __rte_always_inline uint16_t
lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint8_t portid)
+ uint16_t portid)
{
struct ipv6_hdr *ipv6_hdr;
struct ipv4_hdr *ipv4_hdr;
*/
static __rte_always_inline uint16_t
lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint32_t dst_ipv4, uint8_t portid)
+ uint32_t dst_ipv4, uint16_t portid)
{
uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
}
uint16_t
-lpm_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+lpm_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
#define __L3FWD_LPM_H__
static __rte_always_inline void
-l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint8_t portid,
+l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr;
static inline void
l3fwd_lpm_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t j;
processx4_step2(const struct lcore_conf *qconf,
int32x4_t dip,
uint32_t ipv4_flag,
- uint8_t portid,
+ uint16_t portid,
struct rte_mbuf *pkt[FWDSTEP],
uint16_t dprt[FWDSTEP])
{
*/
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t i = 0, j = 0;
uint16_t dst_port[MAX_PKT_BURST];
processx4_step2(const struct lcore_conf *qconf,
__m128i dip,
uint32_t ipv4_flag,
- uint8_t portid,
+ uint16_t portid,
struct rte_mbuf *pkt[FWDSTEP],
uint16_t dprt[FWDSTEP])
{
*/
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t j;
uint16_t dst_port[MAX_PKT_BURST];
struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
static int
check_port_config(const unsigned nb_ports)
{
- unsigned portid;
+ uint16_t portid;
uint16_t i;
for (i = 0; i < nb_lcore_params; ++i) {
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
static void
parse_eth_dest(const char *optarg)
{
- uint8_t portid;
+ uint16_t portid;
char *port_end;
uint8_t c, *dest, peer_addr[6];
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps -%s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
}
static int
-prepare_ptype_parser(uint8_t portid, uint16_t queueid)
+prepare_ptype_parser(uint16_t portid, uint16_t queueid)
{
if (parse_ptype) {
printf("Port %d: softly parse packet type info\n", portid);
struct rte_eth_txconf *txconf;
int ret;
unsigned nb_ports;
- uint16_t queueid;
+ uint16_t queueid, portid;
unsigned lcore_id;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
/* init EAL */
ret = rte_eal_init(argc, argv);
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
ret = 0;
/* launch per-lcore init on every lcore */
{
struct rte_eth_link link;
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
- unsigned portid;
+ uint16_t portid;
total_packets_dropped = 0;
total_packets_tx = 0;
continue;
memset(&link, 0, sizeof(link));
- rte_eth_link_get_nowait((uint8_t)portid, &link);
+ rte_eth_link_get_nowait(portid, &link);
printf("\nStatistics for port %u ------------------------------"
"\nLink status: %25s"
"\nLink speed: %26u"
* int.
*/
static int
-lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
+lsi_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param,
void *ret_param)
{
struct rte_eth_link link;
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint8_t count, all_ports_up, print_flag = 0;
+ uint16_t portid;
struct rte_eth_link link;
printf("\nChecking link status");
if (lp->io.rx.n_nic_queues >= APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE) {
return -9;
}
- lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].port = (uint8_t) port;
+ lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].port = port;
lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].queue = (uint8_t) queue;
lp->io.rx.n_nic_queues ++;
if (lp->io.tx.n_nic_ports >= APP_MAX_NIC_TX_PORTS_PER_IO_LCORE) {
return -9;
}
- lp->io.tx.nic_ports[lp->io.tx.n_nic_ports] = (uint8_t) port;
+ lp->io.tx.nic_ports[lp->io.tx.n_nic_ports] = port;
lp->io.tx.n_nic_ports ++;
n_tuples ++;
static int
app_check_every_rx_port_is_tx_enabled(void)
{
- uint8_t port;
+ uint16_t port;
for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
if ((app_get_nic_rx_queues_per_port(port) > 0) && (app.nic_tx_port_mask[port] == 0)) {
}
int
-app_get_nic_rx_queues_per_port(uint8_t port)
+app_get_nic_rx_queues_per_port(uint16_t port)
{
uint32_t i, count;
}
int
-app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out)
+app_get_lcore_for_nic_rx(uint16_t port, uint8_t queue, uint32_t *lcore_out)
{
uint32_t lcore;
}
int
-app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out)
+app_get_lcore_for_nic_tx(uint16_t port, uint32_t *lcore_out)
{
uint32_t lcore;
/* Print NIC RX configuration */
printf("NIC RX ports: ");
for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
- uint32_t n_rx_queues = app_get_nic_rx_queues_per_port((uint8_t) port);
+ uint32_t n_rx_queues = app_get_nic_rx_queues_per_port(port);
if (n_rx_queues == 0) {
continue;
continue;
}
- if (app_get_lcore_for_nic_tx((uint8_t) port, &lcore_io) < 0) {
+ if (app_get_lcore_for_nic_tx(port, &lcore_io) < 0) {
rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
port);
}
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
uint32_t n_rx_queues, n_tx_queues;
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up - speed %uMbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
{
unsigned socket;
uint32_t lcore;
- uint8_t port, queue;
+ uint16_t port;
+ uint8_t queue;
int ret;
uint32_t n_rx_queues, n_tx_queues;
}
/* Init port */
- printf("Initializing NIC port %u ...\n", (unsigned) port);
+ printf("Initializing NIC port %u ...\n", port);
ret = rte_eth_dev_configure(
port,
(uint8_t) n_rx_queues,
(uint8_t) n_tx_queues,
&port_conf);
if (ret < 0) {
- rte_panic("Cannot init NIC port %u (%d)\n", (unsigned) port, ret);
+ rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
}
rte_eth_promiscuous_enable(port);
port, &nic_rx_ring_size, &nic_tx_ring_size);
if (ret < 0) {
rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
- (unsigned) port, ret);
+ port, ret);
}
app.nic_rx_ring_size = nic_rx_ring_size;
app.nic_tx_ring_size = nic_tx_ring_size;
pool = app.lcore_params[lcore].pool;
printf("Initializing NIC port %u RX queue %u ...\n",
- (unsigned) port,
- (unsigned) queue);
+ port, queue);
ret = rte_eth_rx_queue_setup(
port,
queue,
pool);
if (ret < 0) {
rte_panic("Cannot init RX queue %u for port %u (%d)\n",
- (unsigned) queue,
- (unsigned) port,
- ret);
+ queue, port, ret);
}
}
app_get_lcore_for_nic_tx(port, &lcore);
socket = rte_lcore_to_socket_id(lcore);
printf("Initializing NIC port %u TX queue 0 ...\n",
- (unsigned) port);
+ port);
ret = rte_eth_tx_queue_setup(
port,
0,
struct {
/* NIC */
struct {
- uint8_t port;
+ uint16_t port;
uint8_t queue;
} nic_queues[APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE];
uint32_t n_nic_queues;
struct rte_ring *rings[APP_MAX_NIC_PORTS][APP_MAX_WORKER_LCORES];
/* NIC */
- uint8_t nic_ports[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
+ uint16_t nic_ports[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
uint32_t n_nic_ports;
/* Internal buffers */
void app_init(void);
int app_lcore_main_loop(void *arg);
-int app_get_nic_rx_queues_per_port(uint8_t port);
-int app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out);
-int app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out);
+int app_get_nic_rx_queues_per_port(uint16_t port);
+int app_get_lcore_for_nic_rx(uint16_t port, uint8_t queue,
+ uint32_t *lcore_out);
+int app_get_lcore_for_nic_tx(uint16_t port, uint32_t *lcore_out);
int app_is_socket_used(uint32_t socket);
uint32_t app_get_lcores_io_rx(void);
uint32_t app_get_lcores_worker(void);
uint32_t i;
for (i = 0; i < lp->rx.n_nic_queues; i ++) {
- uint8_t port = lp->rx.nic_queues[i].port;
+ uint16_t port = lp->rx.nic_queues[i].port;
uint8_t queue = lp->rx.nic_queues[i].queue;
uint32_t n_mbufs, j;
printf("I/O RX %u in (NIC port %u): NIC drop ratio = %.2f avg burst size = %.2f\n",
lcore,
- (unsigned) port,
+ port,
(double) stats.imissed / (double) (stats.imissed + stats.ipackets),
((double) lp->rx.nic_queues_count[i]) / ((double) lp->rx.nic_queues_iters[i]));
lp->rx.nic_queues_iters[i] = 0;
uint32_t i;
for (i = 0; i < lp->tx.n_nic_ports; i ++) {
- uint8_t port = lp->tx.nic_ports[i];
+ uint16_t port = lp->tx.nic_ports[i];
struct rte_ring *ring = lp->tx.rings[port][worker];
uint32_t n_mbufs, n_pkts;
int ret;
printf("\t\t\tI/O TX %u out (port %u): avg burst size = %.2f\n",
lcore,
- (unsigned) port,
+ port,
((double) lp->tx.nic_ports_count[port]) / ((double) lp->tx.nic_ports_iters[port]));
lp->tx.nic_ports_iters[port] = 0;
lp->tx.nic_ports_count[port] = 0;
static inline void
app_lcore_io_tx_flush(struct app_lcore_params_io *lp)
{
- uint8_t port;
+ uint16_t port;
uint32_t i;
for (i = 0; i < lp->tx.n_nic_ports; i++) {
if (lp->rings_out_iters[port] == APP_STATS){
printf("\t\tWorker %u out (NIC port %u): enq success rate = %.2f\n",
(unsigned) lp->worker_id,
- (unsigned) port,
+ port,
((double) lp->rings_out_count[port]) / ((double) lp->rings_out_iters[port]));
lp->rings_out_iters[port] = 0;
lp->rings_out_count[port] = 0;
#define MBQ_CAPACITY 32
/* maps input ports to output ports for packets */
-static uint8_t output_ports[RTE_MAX_ETHPORTS];
+static uint16_t output_ports[RTE_MAX_ETHPORTS];
/* buffers up a set of packet that are ready to send */
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
void *userdata) {
int i;
- uint8_t port_id = (uintptr_t)userdata;
+ uint16_t port_id = (uintptr_t)userdata;
tx_stats->tx_drop[port_id] += count;
}
static void
-configure_tx_buffer(uint8_t port_id, uint16_t size)
+configure_tx_buffer(uint16_t port_id, uint16_t size)
{
int ret;
rte_eth_dev_socket_id(port_id));
if (tx_buffer[port_id] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) port_id);
+ port_id);
rte_eth_tx_buffer_init(tx_buffer[port_id], size);
ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
flush_tx_error_callback, (void *)(intptr_t)port_id);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) port_id);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ port_id);
}
/*
rte_exit(EXIT_FAILURE, "Too many ethernet ports. RTE_MAX_ETHPORTS = %u\n",
(unsigned)RTE_MAX_ETHPORTS);
for (i = 0; i < ports->num_ports - 1; i+=2){
- uint8_t p1 = ports->id[i];
- uint8_t p2 = ports->id[i+1];
+ uint16_t p1 = ports->id[i];
+ uint16_t p2 = ports->id[i+1];
output_ports[p1] = p2;
output_ports[p2] = p1;
{
char *end = NULL;
unsigned long pm;
- uint8_t count = 0;
+ uint16_t count = 0;
if (portmask == NULL || *portmask == '\0')
return -1;
* on error.
*/
int
-parse_app_args(uint8_t max_ports, int argc, char *argv[])
+parse_app_args(uint16_t max_ports, int argc, char *argv[])
{
int option_index, opt;
char **argvopt = argv;
#ifndef _ARGS_H_
#define _ARGS_H_
-int parse_app_args(uint8_t max_ports, int argc, char *argv[]);
+int parse_app_args(uint16_t max_ports, int argc, char *argv[]);
#endif /* ifndef _ARGS_H_ */
* - start the port and report its status to stdout
*/
static int
-init_port(uint8_t port_num)
+init_port(uint16_t port_num)
{
/* for port configuration all features are off by default */
const struct rte_eth_conf port_conf = {
uint16_t q;
int retval;
- printf("Port %u init ... ", (unsigned)port_num);
+ printf("Port %u init ... ", port_num);
fflush(stdout);
/* Standard DPDK port initialisation - config port, then set up
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
{
int retval;
const struct rte_memzone *mz;
- uint8_t i, total_ports;
+ uint16_t i, total_ports;
/* init EAL, parsing EAL args */
retval = rte_eal_init(argc, argv);
static struct client_rx_buf *cl_rx_buf;
static const char *
-get_printable_mac_addr(uint8_t port)
+get_printable_mac_addr(uint16_t port)
{
static const char err_address[] = "00:00:00:00:00:00";
static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
} __rte_cache_aligned;
struct port_info {
- uint8_t num_ports;
- uint8_t id[RTE_MAX_ETHPORTS];
+ uint16_t num_ports;
+ uint16_t id[RTE_MAX_ETHPORTS];
volatile struct rx_stats rx_stats;
volatile struct tx_stats tx_stats[MAX_CLIENTS];
};
/* ring[1] for slave send ack, master read */
struct rte_ring *ring[2];
int port_num; /* Total port numbers */
- uint8_t port[RTE_MAX_ETHPORTS]; /* Port id for that lcore to receive packets */
+ /* Port id for that lcore to receive packets */
+ uint16_t port[RTE_MAX_ETHPORTS];
}__attribute__((packed)) __rte_cache_aligned;
static struct lcore_resource_struct lcore_resource[RTE_MAX_LCORE];
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up- speed %u Mbps- %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
unsigned rx_lcore_id;
unsigned nb_ports_in_mask = 0;
unsigned i;
static int32_t
-ifname_to_portid(const char *ifname, uint8_t *port)
+ifname_to_portid(const char *ifname, uint16_t *port)
{
char *endptr;
uint64_t portid;
portid >= RTE_DIM(ports) || errno != 0)
return -EINVAL;
- *port = (uint8_t)portid;
+ *port = portid;
return 0;
}
}
static int
-check_nmreq(struct nmreq *req, uint8_t *port)
+check_nmreq(struct nmreq *req, uint16_t *port)
{
int32_t rc;
- uint8_t portid;
+ uint16_t portid;
if (req == NULL)
return -EINVAL;
}
if (ports[portid].pool == NULL) {
- RTE_LOG(ERR, USER1, "Misconfigured portid %hhu\n", portid);
+ RTE_LOG(ERR, USER1, "Misconfigured portid %u\n", portid);
return -EINVAL;
}
static int
ioctl_niocginfo(__rte_unused int fd, void * param)
{
- uint8_t portid;
+ uint16_t portid;
struct nmreq *req;
int32_t rc;
}
static void
-netmap_ring_setup(struct netmap_ring *ring, uint8_t port, uint32_t ringid,
+netmap_ring_setup(struct netmap_ring *ring, uint16_t port, uint32_t ringid,
uint32_t num_slots)
{
uint32_t j;
}
static int
-netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
+netmap_regif(struct nmreq *req, uint32_t idx, uint16_t port)
{
struct netmap_if *nmif;
struct netmap_ring *ring;
int32_t rc;
if (ports[port].fd < RTE_DIM(fd_port)) {
- RTE_LOG(ERR, USER1, "port %hhu already in use by fd: %u\n",
+ RTE_LOG(ERR, USER1, "port %u already in use by fd: %u\n",
port, IDX_TO_FD(ports[port].fd));
return -EBUSY;
}
static int
ioctl_niocregif(int32_t fd, void * param)
{
- uint8_t portid;
+ uint16_t portid;
int32_t rc;
uint32_t idx;
struct nmreq *req;
{
fd_port[idx].port = FD_PORT_RSRV;
ports[port].fd = UINT32_MAX;
- rte_eth_dev_stop((uint8_t)port);
+ rte_eth_dev_stop(port);
}
/**
* packets as it can hold coming from its dpdk port.
*/
static inline int
-rx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
+rx_sync_ring(struct netmap_ring *ring, uint16_t port, uint16_t ring_number,
uint16_t max_burst)
{
int32_t i, n_rx;
for (i = 0; i < nifp->ni_rx_rings + 1; i++) {
r = NETMAP_RXRING(nifp, i);
- rx_sync_ring(r, (uint8_t)port, (uint16_t)i, burst);
+ rx_sync_ring(r, port, (uint16_t)i, burst);
rc += r->avail;
}
* buffers into rte_mbufs and sending them out on the rings's dpdk port.
*/
static int
-tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
+tx_sync_ring(struct netmap_ring *ring, uint16_t port, uint16_t ring_number,
struct rte_mempool *pool, uint16_t max_burst)
{
uint32_t i, n_tx;
for (i = 0; i < nifp->ni_tx_rings + 1; i++) {
r = NETMAP_TXRING(nifp, i);
- tx_sync_ring(r, (uint8_t)port, (uint16_t)i, mp, burst);
+ tx_sync_ring(r, port, (uint16_t)i, mp, burst);
rc += r->avail;
}
int
-rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
+rte_netmap_init_port(uint16_t portid, const struct rte_netmap_port_conf *conf)
{
int32_t ret;
uint16_t i;
portid >= RTE_DIM(ports) ||
conf->nr_tx_rings > netmap.conf.max_rings ||
conf->nr_rx_rings > netmap.conf.max_rings) {
- RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
+ RTE_LOG(ERR, USER1, "%s(%u): invalid parameters\n",
__func__, portid);
return -EINVAL;
}
- rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
- tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots);
+ rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
+ tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots);
if (tx_slots > netmap.conf.max_slots ||
rx_slots > netmap.conf.max_slots) {
- RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
+ RTE_LOG(ERR, USER1, "%s(%u): invalid parameters\n",
__func__, portid);
return -EINVAL;
}
conf->nr_tx_rings, conf->eth_conf);
if (ret < 0) {
- RTE_LOG(ERR, USER1, "Couldn't configure port %hhu\n", portid);
- return ret;
+ RTE_LOG(ERR, USER1, "Couldn't configure port %u\n", portid);
+ return ret;
}
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_slots, &tx_slots);
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't ot adjust number of descriptors for port %hhu\n",
+ "Couldn't ot adjust number of descriptors for port %u\n",
portid);
return ret;
}
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't configure TX queue %"PRIu16" of "
- "port %"PRIu8"\n",
+ "fail to configure TX queue %u of port %u\n",
i, portid);
return ret;
}
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't configure RX queue %"PRIu16" of "
- "port %"PRIu8"\n",
+ "fail to configure RX queue %u of port %u\n",
i, portid);
return ret;
}
};
int rte_netmap_init(const struct rte_netmap_conf *conf);
-int rte_netmap_init_port(uint8_t portid,
+int rte_netmap_init_port(uint16_t portid,
const struct rte_netmap_port_conf *conf);
int rte_netmap_close(int fd);
rte_eth_dev_socket_id(port_id));
if (tx_buffer[port_id] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) port_id);
+ port_id);
rte_eth_tx_buffer_init(tx_buffer[port_id], MAX_PKTS_BURST);
ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
flush_tx_error_callback, NULL);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) port_id);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ port_id);
}
return 0;
}
static inline int
-configure_eth_port(uint8_t port_id)
+configure_eth_port(uint16_t port_id)
{
struct ether_addr addr;
const uint16_t rxRings = 1, txRings = 1;
rte_eth_macaddr_get(port_id, &addr);
printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port_id,
+ port_id,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
uint32_t seqn = 0;
uint16_t i, ret = 0;
uint16_t nb_rx_pkts;
- uint8_t port_id;
+ uint16_t port_id;
struct rte_mbuf *pkts[MAX_PKTS_BURST];
RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
int ret;
unsigned nb_ports;
unsigned int lcore_id, last_lcore_id, master_lcore_id;
- uint8_t port_id;
- uint8_t nb_ports_available;
+ uint16_t port_id;
+ uint16_t nb_ports_available;
struct worker_thread_args worker_args = {NULL, NULL};
struct send_thread_args send_args = {NULL, NULL};
struct rte_ring *rx_to_workers;
continue;
}
/* init port */
- printf("Initializing port %u... done\n", (unsigned) port_id);
+ printf("Initializing port %u... done\n", port_id);
if (configure_eth_port(port_id) != 0)
rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
}
static uint16_t
-cb_parse_ptype(__rte_unused uint8_t port, __rte_unused uint16_t queue,
+cb_parse_ptype(__rte_unused uint16_t port, __rte_unused uint16_t queue,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
__rte_unused uint16_t max_pkts, __rte_unused void *user_param)
{
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
#define MAX_LCORE_PARAMS 1024
struct rx_thread_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
uint8_t thread_id;
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct thread_tx_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct thread_tx_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint16_t len;
struct thread_tx_conf *qconf;
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
(ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
static __rte_always_inline void
-send_packetsx4(uint8_t port,
+send_packetsx4(uint16_t port,
struct rte_mbuf *m[], uint32_t num)
{
uint32_t len, j, n;
static __m128i mask0;
static __m128i mask1;
static __m128i mask2;
-static inline uint8_t
-get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
int ret = 0;
key.xmm = _mm_and_si128(data, mask0);
/* Find destination port */
ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
}
-static inline uint8_t
-get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid,
lookup_struct_t *ipv6_l3fwd_lookup_struct)
{
int ret = 0;
/* Find destination port */
ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-static inline uint8_t
-get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t)((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
+ return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
&next_hop) == 0) ? next_hop : portid);
}
-static inline uint8_t
-get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid,
lookup6_struct_t *ipv6_l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
+ return ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
((struct ipv6_hdr *)ipv6_hdr)->dst_addr, &next_hop) == 0) ?
next_hop : portid);
}
#endif
-static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
+static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
__attribute__((unused));
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
#define EXCLUDE_8TH_PKT 0x7f
static inline void
-simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
+simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
{
struct ether_hdr *eth_hdr[8];
struct ipv4_hdr *ipv4_hdr[8];
- uint8_t dst_port[8];
+ uint16_t dst_port[8];
int32_t ret[8];
union ipv4_5tuple_host key[8];
__m128i data[8];
rte_hash_lookup_bulk(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct,
&key_array[0], 8, ret);
- dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]);
- dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]);
- dst_port[2] = (uint8_t) ((ret[2] < 0) ? portid : ipv4_l3fwd_out_if[ret[2]]);
- dst_port[3] = (uint8_t) ((ret[3] < 0) ? portid : ipv4_l3fwd_out_if[ret[3]]);
- dst_port[4] = (uint8_t) ((ret[4] < 0) ? portid : ipv4_l3fwd_out_if[ret[4]]);
- dst_port[5] = (uint8_t) ((ret[5] < 0) ? portid : ipv4_l3fwd_out_if[ret[5]]);
- dst_port[6] = (uint8_t) ((ret[6] < 0) ? portid : ipv4_l3fwd_out_if[ret[6]]);
- dst_port[7] = (uint8_t) ((ret[7] < 0) ? portid : ipv4_l3fwd_out_if[ret[7]]);
+ dst_port[0] = ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]);
+ dst_port[1] = ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]);
+ dst_port[2] = ((ret[2] < 0) ? portid : ipv4_l3fwd_out_if[ret[2]]);
+ dst_port[3] = ((ret[3] < 0) ? portid : ipv4_l3fwd_out_if[ret[3]]);
+ dst_port[4] = ((ret[4] < 0) ? portid : ipv4_l3fwd_out_if[ret[4]]);
+ dst_port[5] = ((ret[5] < 0) ? portid : ipv4_l3fwd_out_if[ret[5]]);
+ dst_port[6] = ((ret[6] < 0) ? portid : ipv4_l3fwd_out_if[ret[6]]);
+ dst_port[7] = ((ret[7] < 0) ? portid : ipv4_l3fwd_out_if[ret[7]]);
if (dst_port[0] >= RTE_MAX_ETHPORTS ||
(enabled_port_mask & 1 << dst_port[0]) == 0)
}
static inline void
-simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
+simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
{
int32_t ret[8];
- uint8_t dst_port[8];
+ uint16_t dst_port[8];
struct ether_hdr *eth_hdr[8];
union ipv6_5tuple_host key[8];
rte_hash_lookup_bulk(RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
&key_array[0], 4, ret);
- dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv6_l3fwd_out_if[ret[0]]);
- dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv6_l3fwd_out_if[ret[1]]);
- dst_port[2] = (uint8_t) ((ret[2] < 0) ? portid : ipv6_l3fwd_out_if[ret[2]]);
- dst_port[3] = (uint8_t) ((ret[3] < 0) ? portid : ipv6_l3fwd_out_if[ret[3]]);
- dst_port[4] = (uint8_t) ((ret[4] < 0) ? portid : ipv6_l3fwd_out_if[ret[4]]);
- dst_port[5] = (uint8_t) ((ret[5] < 0) ? portid : ipv6_l3fwd_out_if[ret[5]]);
- dst_port[6] = (uint8_t) ((ret[6] < 0) ? portid : ipv6_l3fwd_out_if[ret[6]]);
- dst_port[7] = (uint8_t) ((ret[7] < 0) ? portid : ipv6_l3fwd_out_if[ret[7]]);
+ dst_port[0] = ((ret[0] < 0) ? portid : ipv6_l3fwd_out_if[ret[0]]);
+ dst_port[1] = ((ret[1] < 0) ? portid : ipv6_l3fwd_out_if[ret[1]]);
+ dst_port[2] = ((ret[2] < 0) ? portid : ipv6_l3fwd_out_if[ret[2]]);
+ dst_port[3] = ((ret[3] < 0) ? portid : ipv6_l3fwd_out_if[ret[3]]);
+ dst_port[4] = ((ret[4] < 0) ? portid : ipv6_l3fwd_out_if[ret[4]]);
+ dst_port[5] = ((ret[5] < 0) ? portid : ipv6_l3fwd_out_if[ret[5]]);
+ dst_port[6] = ((ret[6] < 0) ? portid : ipv6_l3fwd_out_if[ret[6]]);
+ dst_port[7] = ((ret[7] < 0) ? portid : ipv6_l3fwd_out_if[ret[7]]);
if (dst_port[0] >= RTE_MAX_ETHPORTS ||
(enabled_port_mask & 1 << dst_port[0]) == 0)
ether_addr_copy(&ports_eth_addr[dst_port[6]], ð_hdr[6]->s_addr);
ether_addr_copy(&ports_eth_addr[dst_port[7]], ð_hdr[7]->s_addr);
- send_single_packet(m[0], (uint8_t)dst_port[0]);
- send_single_packet(m[1], (uint8_t)dst_port[1]);
- send_single_packet(m[2], (uint8_t)dst_port[2]);
- send_single_packet(m[3], (uint8_t)dst_port[3]);
- send_single_packet(m[4], (uint8_t)dst_port[4]);
- send_single_packet(m[5], (uint8_t)dst_port[5]);
- send_single_packet(m[6], (uint8_t)dst_port[6]);
- send_single_packet(m[7], (uint8_t)dst_port[7]);
+ send_single_packet(m[0], dst_port[0]);
+ send_single_packet(m[1], dst_port[1]);
+ send_single_packet(m[2], dst_port[2]);
+ send_single_packet(m[3], dst_port[3]);
+ send_single_packet(m[4], dst_port[4]);
+ send_single_packet(m[5], dst_port[5]);
+ send_single_packet(m[6], dst_port[6]);
+ send_single_packet(m[7], dst_port[7]);
}
#endif /* APP_LOOKUP_METHOD */
static __rte_always_inline void
-l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
+l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
(ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
static __rte_always_inline uint16_t
-get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
+get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint16_t portid)
{
uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
}
static inline void
-process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint8_t portid)
+process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint16_t portid)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
static inline void
processx4_step2(__m128i dip,
uint32_t ipv4_flag,
- uint8_t portid,
+ uint16_t portid,
struct rte_mbuf *pkt[FWDSTEP],
uint16_t dprt[FWDSTEP])
{
static void
process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,
- uint8_t portid) {
+ uint16_t portid)
+{
int j;
struct lthread *lt;
unsigned lcore_id;
- uint8_t portid;
+ uint16_t portid;
struct thread_tx_conf *tx_conf;
tx_conf = (struct thread_tx_conf *)args;
int ret;
uint16_t nb_rx;
int i;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
int worker_id;
int len[RTE_MAX_LCORE] = { 0 };
int old_len, new_len;
portid = rx_conf->rx_queue_list[i].port_id;
queueid = rx_conf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ RTE_LOG(INFO, L3FWD,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
rte_lcore_id(), portid, queueid);
}
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
uint64_t prev_tsc, diff_tsc, cur_tsc;
int nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct thread_tx_conf *tx_conf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
uint32_t n;
uint32_t nb_rx;
unsigned lcore_id;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct thread_rx_conf *rx_conf;
portid = rx_conf->rx_queue_list[i].port_id;
queueid = rx_conf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ RTE_LOG(INFO, L3FWD,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
return -1;
}
rx_thread_params_array[nb_rx_thread_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ int_fld[FLD_PORT];
rx_thread_params_array[nb_rx_thread_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
rx_thread_params_array[nb_rx_thread_params].lcore_id =
static void
parse_eth_dest(const char *optarg)
{
- uint8_t portid;
+ uint16_t portid;
char *port_end;
uint8_t c, *dest, peer_addr[6];
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
int ret;
int i;
unsigned nb_ports;
- uint16_t queueid;
+ uint16_t queueid, portid;
unsigned lcore_id;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
/* init EAL */
ret = rte_eal_init(argc, argv);
struct clock_id master_clock_id;
struct timeval new_adj;
int64_t delta;
- uint8_t portid;
+ uint16_t portid;
uint16_t seqID_SYNC;
uint16_t seqID_FOLLOWUP;
uint8_t ptpset;
uint8_t kernel_time_set;
- uint8_t current_ptp_port;
+ uint16_t current_ptp_port;
};
static struct ptpv2_data_slave_ordinary ptp_data;
pconf = &qos_conf[nb_pfc];
- pconf->rx_port = (uint8_t)vals[0];
- pconf->tx_port = (uint8_t)vals[1];
+ pconf->rx_port = vals[0];
+ pconf->tx_port = vals[1];
pconf->rx_core = (uint8_t)vals[2];
pconf->wt_core = (uint8_t)vals[3];
if (ret == 5)
}
if (pconf->rx_port >= RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, APP, "pfc %u: invalid rx port %"PRIu8" index\n",
+ RTE_LOG(ERR, APP, "pfc %u: invalid rx port %"PRIu16" index\n",
nb_pfc, pconf->rx_port);
return -1;
}
if (pconf->tx_port >= RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, APP, "pfc %u: invalid tx port %"PRIu8" index\n",
+ RTE_LOG(ERR, APP, "pfc %u: invalid tx port %"PRIu16" index\n",
nb_pfc, pconf->tx_port);
return -1;
}
mask = 1lu << pconf->rx_port;
if (app_used_rx_port_mask & mask) {
- RTE_LOG(ERR, APP, "pfc %u: rx port %"PRIu8" is used already\n",
+ RTE_LOG(ERR, APP, "pfc %u: rx port %"PRIu16" is used already\n",
nb_pfc, pconf->rx_port);
return -1;
}
mask = 1lu << pconf->tx_port;
if (app_used_tx_port_mask & mask) {
- RTE_LOG(ERR, APP, "pfc %u: port %"PRIu8" is used already\n",
+ RTE_LOG(ERR, APP, "pfc %u: port %"PRIu16" is used already\n",
nb_pfc, pconf->tx_port);
return -1;
}
};
static int
-app_init_port(uint8_t portid, struct rte_mempool *mp)
+app_init_port(uint16_t portid, struct rte_mempool *mp)
{
int ret;
struct rte_eth_link link;
tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS;
/* init port */
- RTE_LOG(INFO, APP, "Initializing port %"PRIu8"... ", portid);
+ RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot configure device: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot configure device: err=%d, port=%u\n",
+ ret, portid);
rx_size = ring_conf.rx_size;
tx_size = ring_conf.tx_size;
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_dev_adjust_nb_rx_tx_desc: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d,port=%u\n",
+ ret, portid);
ring_conf.rx_size = rx_size;
ring_conf.tx_size = tx_size;
ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
rte_eth_dev_socket_id(portid), &rx_conf, mp);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup: err=%d, port=%u\n",
+ ret, portid);
/* init one TX queue */
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0,
(uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
- "port=%"PRIu8" queue=%d\n", ret, portid, 0);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup: err=%d, port=%u queue=%d\n",
+ ret, portid, 0);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_pmd_port_start: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_pmd_port_start: err=%d, port=%u\n",
+ ret, portid);
printf("done: ");
uint32_t pipe, subport;
int err;
- rte_eth_link_get((uint8_t)portid, &link);
+ rte_eth_link_get(portid, &link);
port_params.socket = socketid;
port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
/* initialize mbuf memory */
if (mode == APP_RX_MODE) {
for (i = 0; i < rx_idx; i++) {
- RTE_LOG(INFO, APP, "flow %u lcoreid %u "
- "reading port %"PRIu8"\n",
+ RTE_LOG(INFO, APP, "flow%u lcoreid%u reading port%u\n",
i, lcore_id, rx_confs[i]->rx_port);
}
if (wt_confs[i]->m_table == NULL)
rte_panic("flow %u unable to allocate memory buffer\n", i);
- RTE_LOG(INFO, APP, "flow %u lcoreid %u sched+write "
- "port %"PRIu8"\n",
+ RTE_LOG(INFO, APP,
+ "flow %u lcoreid %u sched+write port %u\n",
i, lcore_id, wt_confs[i]->tx_port);
}
if (tx_confs[i]->m_table == NULL)
rte_panic("flow %u unable to allocate memory buffer\n", i);
- RTE_LOG(INFO, APP, "flow %u lcoreid %u "
- "writing port %"PRIu8"\n",
+ RTE_LOG(INFO, APP, "flow%u lcoreid%u write port%u\n",
i, lcore_id, tx_confs[i]->tx_port);
}
struct flow_conf *flow = &qos_conf[i];
rte_eth_stats_get(flow->rx_port, &stats);
- printf("\nRX port %"PRIu8": rx: %"PRIu64 " err: %"PRIu64
+ printf("\nRX port %"PRIu16": rx: %"PRIu64 " err: %"PRIu64
" no_mbuf: %"PRIu64 "\n",
flow->rx_port,
stats.ipackets - rx_stats[i].ipackets,
memcpy(&rx_stats[i], &stats, sizeof(stats));
rte_eth_stats_get(flow->tx_port, &stats);
- printf("TX port %"PRIu8": tx: %" PRIu64 " err: %" PRIu64 "\n",
+ printf("TX port %"PRIu16": tx: %" PRIu64 " err: %" PRIu64 "\n",
flow->tx_port,
stats.opackets - tx_stats[i].opackets,
stats.oerrors - tx_stats[i].oerrors);
uint32_t n_mbufs;
struct rte_mbuf **m_table;
- uint8_t rx_port;
- uint8_t tx_port;
+ uint16_t rx_port;
+ uint16_t tx_port;
uint16_t rx_queue;
uint16_t tx_queue;
struct rte_ring *rx_ring;
uint32_t rx_core;
uint32_t wt_core;
uint32_t tx_core;
- uint8_t rx_port;
- uint8_t tx_port;
+ uint16_t rx_port;
+ uint16_t tx_port;
uint16_t rx_queue;
uint16_t tx_queue;
struct rte_ring *rx_ring;
void app_mixed_thread(struct thread_conf **qconf);
void app_stat(void);
-int subport_stat(uint8_t port_id, uint32_t subport_id);
-int pipe_stat(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id);
-int qavg_q(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8_t q);
-int qavg_tcpipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc);
-int qavg_pipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id);
-int qavg_tcsubport(uint8_t port_id, uint32_t subport_id, uint8_t tc);
-int qavg_subport(uint8_t port_id, uint32_t subport_id);
+int subport_stat(uint16_t port_id, uint32_t subport_id);
+int pipe_stat(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id);
+int qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
+ uint8_t tc, uint8_t q);
+int qavg_tcpipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
+ uint8_t tc);
+int qavg_pipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id);
+int qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc);
+int qavg_subport(uint16_t port_id, uint32_t subport_id);
#ifdef __cplusplus
}
#include "main.h"
int
-qavg_q(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8_t q)
+qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc,
+ uint8_t q)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
}
int
-qavg_tcpipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc)
+qavg_tcpipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
+ uint8_t tc)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
}
int
-qavg_pipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id)
+qavg_pipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
}
int
-qavg_tcsubport(uint8_t port_id, uint32_t subport_id, uint8_t tc)
+qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
}
int
-qavg_subport(uint8_t port_id, uint32_t subport_id)
+qavg_subport(uint16_t port_id, uint32_t subport_id)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
}
int
-subport_stat(uint8_t port_id, uint32_t subport_id)
+subport_stat(uint16_t port_id, uint32_t subport_id)
{
struct rte_sched_subport_stats stats;
struct rte_sched_port *port;
}
int
-pipe_stat(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id)
+pipe_stat(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
void
pair_ports(void)
{
- uint8_t i, j;
+ uint16_t i, j;
/* Pair ports with their "closest neighbour" in the portmask */
for (i = 0; i < RTE_MAX_ETHPORTS; i++)
if (is_bit_set(i, portmask))
- for (j = (uint8_t) (i + 1); j < RTE_MAX_ETHPORTS; j++)
+ for (j = i + 1; j < RTE_MAX_ETHPORTS; j++)
if (is_bit_set(j, portmask)) {
port_pairs[i] = j;
port_pairs[j] = i;
{
int i, ret;
- uint8_t port_id;
+ uint16_t port_id;
uint16_t nb_rx_pkts;
unsigned int lcore_id;
int i, ret;
int nb_dq_pkts;
- uint8_t port_id;
+ uint16_t port_id;
unsigned int lcore_id, previous_lcore_id;
unsigned int free;
{
uint16_t nb_dq_pkts;
- uint8_t port_id;
- uint8_t dest_port_id;
+ uint16_t port_id;
+ uint16_t dest_port_id;
unsigned int lcore_id, previous_lcore_id;
int ret;
unsigned int lcore_id, master_lcore_id, last_lcore_id;
- uint8_t port_id;
+ uint16_t port_id;
rte_log_set_global_level(RTE_LOG_INFO);
static uint16_t
-add_timestamps(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts,
uint16_t max_pkts __rte_unused, void *_ __rte_unused)
{
}
static uint16_t
-calc_latency(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+calc_latency(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused)
{
uint64_t cycles = 0;
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
int ret = rte_eal_init(argc, argv);
static struct node_rx_buf *cl_rx_buf;
static const char *
-get_printable_mac_addr(uint8_t port)
+get_printable_mac_addr(uint16_t port)
{
static const char err_address[] = "00:00:00:00:00:00";
static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
struct shared_info {
uint8_t num_nodes;
- uint8_t num_ports;
+ uint16_t num_ports;
uint32_t num_flows;
- uint8_t id[RTE_MAX_ETHPORTS];
+ uint16_t id[RTE_MAX_ETHPORTS];
struct rx_stats rx_stats;
struct tx_stats tx_stats[MAX_NODES];
struct filter_stats filter_stats[MAX_NODES];
* coming from the mbuf_pool passed as a parameter.
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
" %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
- (unsigned)port,
+ port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
static __attribute__((noreturn)) void
lcore_main(void)
{
- const uint8_t nb_ports = rte_eth_dev_count();
- uint8_t port;
+ const uint16_t nb_ports = rte_eth_dev_count();
+ uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
{
struct rte_mempool *mbuf_pool;
unsigned nb_ports;
- uint8_t portid;
+ uint16_t portid;
/* Initialize the Environment Abstraction Layer (EAL). */
int ret = rte_eal_init(argc, argv);
/* Initialize all ports. */
for (portid = 0; portid < nb_ports; portid++)
if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
if (rte_lcore_count() > 1)
};
static unsigned lcore_ids[RTE_MAX_LCORE];
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports = 0; /**< The number of ports specified in command line */
static uint16_t num_pf_queues, num_vmdq_queues;
static uint16_t vmdq_pool_base, vmdq_queue_base;
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port)
+port_init(uint16_t port)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf;
RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
+ port,
vmdq_ports_eth_addr[port].addr_bytes[0],
vmdq_ports_eth_addr[port].addr_bytes[1],
vmdq_ports_eth_addr[port].addr_bytes[2],
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
if (enabled_port_mask & (1 << i))
- ports[num_ports++] = (uint8_t)i;
+ ports[num_ports++] = i;
}
if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
unsigned lcore_id, core_id = 0;
unsigned nb_ports, valid_num_ports;
int ret, i;
- uint8_t portid;
+ uint16_t portid;
static pthread_t tid;
char thread_name[RTE_MAX_THREAD_NAME_LEN];
uint64_t flags = 0;
EXPORT_MAP := rte_bitratestats_version.map
-LIBABIVER := 1
+LIBABIVER := 2
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_BITRATE) := rte_bitrate.c
int
rte_stats_bitrate_calc(struct rte_stats_bitrates *bitrate_data,
- uint8_t port_id)
+ uint16_t port_id)
{
struct rte_stats_bitrate *port_data;
struct rte_eth_stats eth_stats;
* - Negative value on error
*/
int rte_stats_bitrate_calc(struct rte_stats_bitrates *bitrate_data,
- uint8_t port_id);
+ uint16_t port_id);
#ifdef __cplusplus
}
EXPORT_MAP := rte_ethdev_version.map
-LIBABIVER := 7
+LIBABIVER := 8
SRCS-y += rte_ethdev.c
SRCS-y += rte_flow.c
STAT_QMAP_RX
};
-uint8_t
-rte_eth_find_next(uint8_t port_id)
+uint16_t
+rte_eth_find_next(uint16_t port_id)
{
while (port_id < RTE_MAX_ETHPORTS &&
rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
return NULL;
}
-static uint8_t
+static uint16_t
rte_eth_dev_find_free_port(void)
{
unsigned i;
}
static struct rte_eth_dev *
-eth_dev_get(uint8_t port_id)
+eth_dev_get(uint16_t port_id)
{
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
struct rte_eth_dev *
rte_eth_dev_allocate(const char *name)
{
- uint8_t port_id;
+ uint16_t port_id;
struct rte_eth_dev *eth_dev;
port_id = rte_eth_dev_find_free_port();
struct rte_eth_dev *
rte_eth_dev_attach_secondary(const char *name)
{
- uint8_t i;
+ uint16_t i;
struct rte_eth_dev *eth_dev;
if (rte_eth_dev_data == NULL)
}
int
-rte_eth_dev_is_valid_port(uint8_t port_id)
+rte_eth_dev_is_valid_port(uint16_t port_id)
{
if (port_id >= RTE_MAX_ETHPORTS ||
(rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
}
int
-rte_eth_dev_socket_id(uint8_t port_id)
+rte_eth_dev_socket_id(uint16_t port_id)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
return rte_eth_devices[port_id].data->numa_node;
}
-uint8_t
+uint16_t
rte_eth_dev_count(void)
{
- uint8_t p;
- uint8_t count;
+ uint16_t p;
+ uint16_t count;
count = 0;
}
int
-rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
+rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
{
const char *tmp;
}
int
-rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
+rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
{
int ret;
int i;
}
static int
-rte_eth_dev_is_detachable(uint8_t port_id)
+rte_eth_dev_is_detachable(uint16_t port_id)
{
uint32_t dev_flags;
/* attach the new device, then store port_id of the device */
int
-rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
+rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
{
int ret = -1;
int current = rte_eth_dev_count();
/* detach the device, then store the name of the device */
int
-rte_eth_dev_detach(uint8_t port_id, char *name)
+rte_eth_dev_detach(uint16_t port_id, char *name)
{
int ret = -1;
}
int
-rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
+rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
+rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
+rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
+rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
+rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
{
struct rte_eth_dev *dev;
}
static void
-rte_eth_dev_config_restore(uint8_t port_id)
+rte_eth_dev_config_restore(uint16_t port_id)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
}
int
-rte_eth_dev_start(uint8_t port_id)
+rte_eth_dev_start(uint16_t port_id)
{
struct rte_eth_dev *dev;
int diag;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
if (dev->data->dev_started != 0) {
- RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
" already started\n",
port_id);
return 0;
}
void
-rte_eth_dev_stop(uint8_t port_id)
+rte_eth_dev_stop(uint16_t port_id)
{
struct rte_eth_dev *dev;
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
if (dev->data->dev_started == 0) {
- RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
+ RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
" already stopped\n",
port_id);
return;
}
int
-rte_eth_dev_set_link_up(uint8_t port_id)
+rte_eth_dev_set_link_up(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_set_link_down(uint8_t port_id)
+rte_eth_dev_set_link_down(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
void
-rte_eth_dev_close(uint8_t port_id)
+rte_eth_dev_close(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_reset(uint8_t port_id)
+rte_eth_dev_reset(uint16_t port_id)
{
struct rte_eth_dev *dev;
int ret;
}
int
-rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
+rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
}
int
-rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
+rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
}
int
-rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt)
+rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
}
void
-rte_eth_promiscuous_enable(uint8_t port_id)
+rte_eth_promiscuous_enable(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
void
-rte_eth_promiscuous_disable(uint8_t port_id)
+rte_eth_promiscuous_disable(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_promiscuous_get(uint8_t port_id)
+rte_eth_promiscuous_get(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
void
-rte_eth_allmulticast_enable(uint8_t port_id)
+rte_eth_allmulticast_enable(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
void
-rte_eth_allmulticast_disable(uint8_t port_id)
+rte_eth_allmulticast_disable(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_allmulticast_get(uint8_t port_id)
+rte_eth_allmulticast_get(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
void
-rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
+rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
{
struct rte_eth_dev *dev;
}
void
-rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
+rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
+rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
{
struct rte_eth_dev *dev;
}
void
-rte_eth_stats_reset(uint8_t port_id)
+rte_eth_stats_reset(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
static int
-get_xstats_count(uint8_t port_id)
+get_xstats_count(uint16_t port_id)
{
struct rte_eth_dev *dev;
int count;
}
int
-rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
+rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
uint64_t *id)
{
int cnt_xstats, idx_xstat;
}
int
-rte_eth_xstats_get_names_by_id(uint8_t port_id,
+rte_eth_xstats_get_names_by_id(uint16_t port_id,
struct rte_eth_xstat_name *xstats_names, unsigned int size,
uint64_t *ids)
{
}
int
-rte_eth_xstats_get_names(uint8_t port_id,
+rte_eth_xstats_get_names(uint16_t port_id,
struct rte_eth_xstat_name *xstats_names,
unsigned int size)
{
/* retrieve ethdev extended statistics */
int
-rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, uint64_t *values,
- unsigned int n)
+rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
{
/* If need all xstats */
if (!ids) {
}
int
-rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
+rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
unsigned int n)
{
struct rte_eth_stats eth_stats;
/* reset ethdev extended statistics */
void
-rte_eth_xstats_reset(uint8_t port_id)
+rte_eth_xstats_reset(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
static int
-set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
+set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
uint8_t is_rx)
{
struct rte_eth_dev *dev;
int
-rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
+rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
uint8_t stat_idx)
{
return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
int
-rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
+rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
uint8_t stat_idx)
{
return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
}
int
-rte_eth_dev_fw_version_get(uint8_t port_id, char *fw_version, size_t fw_size)
+rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
{
struct rte_eth_dev *dev;
}
void
-rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
+rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
{
struct rte_eth_dev *dev;
const struct rte_eth_desc_lim lim = {
}
int
-rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
+rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
uint32_t *ptypes, int num)
{
int i, j;
}
void
-rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
+rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
{
struct rte_eth_dev *dev;
int
-rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
+rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
+rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
{
int ret;
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
+rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
{
struct rte_eth_dev *dev;
int ret;
}
int
-rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
+rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
+ int on)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
+rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
enum rte_vlan_type vlan_type,
uint16_t tpid)
{
}
int
-rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
+rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
{
struct rte_eth_dev *dev;
int ret = 0;
}
int
-rte_eth_dev_get_vlan_offload(uint8_t port_id)
+rte_eth_dev_get_vlan_offload(uint16_t port_id)
{
struct rte_eth_dev *dev;
int ret = 0;
}
int
-rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
+rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
+rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
+rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
+rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
+ struct rte_eth_pfc_conf *pfc_conf)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_rss_reta_update(uint8_t port_id,
+rte_eth_dev_rss_reta_update(uint16_t port_id,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
}
int
-rte_eth_dev_rss_reta_query(uint8_t port_id,
+rte_eth_dev_rss_reta_query(uint16_t port_id,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
}
int
-rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
+rte_eth_dev_rss_hash_update(uint16_t port_id,
+ struct rte_eth_rss_conf *rss_conf)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
+rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
struct rte_eth_rss_conf *rss_conf)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
+rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
struct rte_eth_udp_tunnel *udp_tunnel)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
+rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
struct rte_eth_udp_tunnel *udp_tunnel)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_led_on(uint8_t port_id)
+rte_eth_led_on(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_led_off(uint8_t port_id)
+rte_eth_led_off(uint16_t port_id)
{
struct rte_eth_dev *dev;
* an empty spot.
*/
static int
-get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
+get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
static const struct ether_addr null_mac_addr;
int
-rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
+rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
uint32_t pool)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
+rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
{
struct rte_eth_dev *dev;
int index;
}
int
-rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
+rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
{
struct rte_eth_dev *dev;
* an empty spot.
*/
static int
-get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
+get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
}
int
-rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
+rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
uint8_t on)
{
int index;
}
int
-rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
+rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
{
struct rte_eth_dev *dev;
return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
}
-int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
+int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
uint16_t tx_rate)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_mirror_rule_set(uint8_t port_id,
+rte_eth_mirror_rule_set(uint16_t port_id,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
}
int
-rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
+rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_callback_register(uint8_t port_id,
+rte_eth_dev_callback_register(uint16_t port_id,
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
{
}
int
-rte_eth_dev_callback_unregister(uint8_t port_id,
+rte_eth_dev_callback_unregister(uint16_t port_id,
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
{
}
int
-rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
+rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
{
uint32_t vec;
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
+rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
int epfd, int op, void *data)
{
uint32_t vec;
}
int
-rte_eth_dev_rx_intr_enable(uint8_t port_id,
+rte_eth_dev_rx_intr_enable(uint16_t port_id,
uint16_t queue_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_rx_intr_disable(uint8_t port_id,
+rte_eth_dev_rx_intr_disable(uint16_t port_id,
uint16_t queue_id)
{
struct rte_eth_dev *dev;
int
-rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
+rte_eth_dev_filter_supported(uint16_t port_id,
+ enum rte_filter_type filter_type)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
+rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
{
struct rte_eth_dev *dev;
}
void *
-rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
+rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
}
void *
-rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
+rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
}
void *
-rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
+rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
rte_tx_callback_fn fn, void *user_param)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
}
int
-rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
+rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxtx_callback *user_cb)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
}
int
-rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
+rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxtx_callback *user_cb)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
}
int
-rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
struct rte_eth_txq_info *qinfo)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_set_mc_addr_list(uint8_t port_id,
+rte_eth_dev_set_mc_addr_list(uint16_t port_id,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
}
int
-rte_eth_timesync_enable(uint8_t port_id)
+rte_eth_timesync_enable(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_timesync_disable(uint8_t port_id)
+rte_eth_timesync_disable(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
+rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
uint32_t flags)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
+rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
+ struct timespec *timestamp)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
+rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
+rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
+rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
+rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_get_eeprom_length(uint8_t port_id)
+rte_eth_dev_get_eeprom_length(uint16_t port_id)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
+rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
+rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_get_dcb_info(uint8_t port_id,
+rte_eth_dev_get_dcb_info(uint16_t port_id,
struct rte_eth_dcb_info *dcb_info)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
+rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
struct rte_eth_l2_tunnel_conf *l2_tunnel)
{
struct rte_eth_dev *dev;
}
int
-rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
+rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
uint32_t mask,
uint8_t en)
}
int
-rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
+rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
uint16_t *nb_rx_desc,
uint16_t *nb_tx_desc)
{
* @return
* The number of packets returned to the user.
*/
-typedef uint16_t (*rte_rx_callback_fn)(uint8_t port, uint16_t queue,
+typedef uint16_t (*rte_rx_callback_fn)(uint16_t port, uint16_t queue,
struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
void *user_param);
* @return
* The number of packets to be written to the NIC.
*/
-typedef uint16_t (*rte_tx_callback_fn)(uint8_t port, uint16_t queue,
+typedef uint16_t (*rte_tx_callback_fn)(uint16_t port, uint16_t queue,
struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
/**
/** bitmap array of associating Ethernet MAC addresses to pools */
struct ether_addr* hash_mac_addrs;
/** Device Ethernet MAC addresses of hash filtering. */
- uint8_t port_id; /**< Device [external] port identifier. */
+ uint16_t port_id; /**< Device [external] port identifier. */
__extension__
uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */
* @return
* Next valid port id, RTE_MAX_ETHPORTS if there is none.
*/
-uint8_t rte_eth_find_next(uint8_t port_id);
+uint16_t rte_eth_find_next(uint16_t port_id);
/**
* Macro to iterate over all enabled ethdev ports.
* @return
* - The total number of usable Ethernet devices.
*/
-uint8_t rte_eth_dev_count(void);
+uint16_t rte_eth_dev_count(void);
/**
* @internal
* @return
* 0 on success and port_id is filled, negative on error
*/
-int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
+int rte_eth_dev_attach(const char *devargs, uint16_t *port_id);
/**
* Detach a Ethernet device specified by port identifier.
* @return
* 0 on success and devname is filled, negative on error
*/
-int rte_eth_dev_detach(uint8_t port_id, char *devname);
+int rte_eth_dev_detach(uint16_t port_id, char *devname);
/**
* Convert a numerical speed in Mbps to a bitmap flag that can be used in
* - 0: Success, device configured.
* - <0: Error code returned by the driver configuration function.
*/
-int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_queue,
+int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
/**
* allocate network memory buffers from the memory pool when
* initializing receive descriptors.
*/
-int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
+int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
* - 0: Success, the transmit queue is correctly set up.
* - -ENOMEM: Unable to allocate the transmit ring descriptors.
*/
-int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
+int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
* a default of zero if the socket could not be determined.
* -1 is returned is the port_id value is out of range.
*/
-int rte_eth_dev_socket_id(uint8_t port_id);
+int rte_eth_dev_socket_id(uint16_t port_id);
/**
* Check if port_id of device is attached
* - 0 if port is out of range or not attached
* - 1 if device is attached
*/
-int rte_eth_dev_is_valid_port(uint8_t port_id);
+int rte_eth_dev_is_valid_port(uint16_t port_id);
/**
* Start specified RX queue of a port. It is used when rx_deferred_start
* - -EINVAL: The port_id or the queue_id out of range.
* - -ENOTSUP: The function not supported in PMD driver.
*/
-int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
+int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
/**
* Stop specified RX queue of a port
* - -EINVAL: The port_id or the queue_id out of range.
* - -ENOTSUP: The function not supported in PMD driver.
*/
-int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
+int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
/**
* Start TX for specified queue of a port. It is used when tx_deferred_start
* - -EINVAL: The port_id or the queue_id out of range.
* - -ENOTSUP: The function not supported in PMD driver.
*/
-int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
+int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
/**
* Stop specified TX queue of a port
* - -EINVAL: The port_id or the queue_id out of range.
* - -ENOTSUP: The function not supported in PMD driver.
*/
-int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
+int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
* - 0: Success, Ethernet device started.
* - <0: Error code of the driver device start function.
*/
-int rte_eth_dev_start(uint8_t port_id);
+int rte_eth_dev_start(uint16_t port_id);
/**
* Stop an Ethernet device. The device can be restarted with a call to
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_dev_stop(uint8_t port_id);
+void rte_eth_dev_stop(uint16_t port_id);
/**
* - 0: Success, Ethernet device linked up.
* - <0: Error code of the driver device link up function.
*/
-int rte_eth_dev_set_link_up(uint8_t port_id);
+int rte_eth_dev_set_link_up(uint16_t port_id);
/**
* Link down an Ethernet device.
* @param port_id
* The port identifier of the Ethernet device.
*/
-int rte_eth_dev_set_link_down(uint8_t port_id);
+int rte_eth_dev_set_link_down(uint16_t port_id);
/**
* Close a stopped Ethernet device. The device cannot be restarted!
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_dev_close(uint8_t port_id);
+void rte_eth_dev_close(uint16_t port_id);
/**
* Reset a Ethernet device and keep its port id.
* @param port_id
* The port identifier of the Ethernet device.
*/
-int rte_eth_dev_reset(uint8_t port_id);
+int rte_eth_dev_reset(uint16_t port_id);
/**
* Enable receipt in promiscuous mode for an Ethernet device.
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_promiscuous_enable(uint8_t port_id);
+void rte_eth_promiscuous_enable(uint16_t port_id);
/**
* Disable receipt in promiscuous mode for an Ethernet device.
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_promiscuous_disable(uint8_t port_id);
+void rte_eth_promiscuous_disable(uint16_t port_id);
/**
* Return the value of promiscuous mode for an Ethernet device.
* - (0) if promiscuous is disabled.
* - (-1) on error
*/
-int rte_eth_promiscuous_get(uint8_t port_id);
+int rte_eth_promiscuous_get(uint16_t port_id);
/**
* Enable the receipt of any multicast frame by an Ethernet device.
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_allmulticast_enable(uint8_t port_id);
+void rte_eth_allmulticast_enable(uint16_t port_id);
/**
* Disable the receipt of all multicast frames by an Ethernet device.
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_allmulticast_disable(uint8_t port_id);
+void rte_eth_allmulticast_disable(uint16_t port_id);
/**
* Return the value of allmulticast mode for an Ethernet device.
* - (0) if allmulticast is disabled.
* - (-1) on error
*/
-int rte_eth_allmulticast_get(uint8_t port_id);
+int rte_eth_allmulticast_get(uint16_t port_id);
/**
* Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX
* A pointer to an *rte_eth_link* structure to be filled with
* the status, the speed and the mode of the Ethernet device link.
*/
-void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
+void rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
/**
* Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX
* A pointer to an *rte_eth_link* structure to be filled with
* the status, the speed and the mode of the Ethernet device link.
*/
-void rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *link);
+void rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
/**
* Retrieve the general I/O statistics of an Ethernet device.
* @return
* Zero if successful. Non-zero otherwise.
*/
-int rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
+int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
/**
* Reset the general I/O statistics of an Ethernet device.
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_stats_reset(uint8_t port_id);
+void rte_eth_stats_reset(uint16_t port_id);
/**
* Retrieve names of extended statistics of an Ethernet device.
* shall not be used by the caller.
* - A negative value on error (invalid port id).
*/
-int rte_eth_xstats_get_names(uint8_t port_id,
+int rte_eth_xstats_get_names(uint16_t port_id,
struct rte_eth_xstat_name *xstats_names,
unsigned int size);
* shall not be used by the caller.
* - A negative value on error (invalid port id).
*/
-int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
+int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
unsigned int n);
/**
* - A negative value on error (invalid port id).
*/
int
-rte_eth_xstats_get_names_by_id(uint8_t port_id,
+rte_eth_xstats_get_names_by_id(uint16_t port_id,
struct rte_eth_xstat_name *xstats_names, unsigned int size,
uint64_t *ids);
* shall not be used by the caller.
* - A negative value on error (invalid port id).
*/
-int rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids,
+int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
uint64_t *values, unsigned int n);
/**
* -ENODEV for invalid port_id,
* -EINVAL if the xstat_name doesn't exist in port_id
*/
-int rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
+int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
uint64_t *id);
/**
* @param port_id
* The port identifier of the Ethernet device.
*/
-void rte_eth_xstats_reset(uint8_t port_id);
+void rte_eth_xstats_reset(uint16_t port_id);
/**
* Set a mapping for the specified transmit queue to the specified per-queue
* @return
* Zero if successful. Non-zero otherwise.
*/
-int rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id,
+int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
uint16_t tx_queue_id, uint8_t stat_idx);
/**
* @return
* Zero if successful. Non-zero otherwise.
*/
-int rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id,
+int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
uint16_t rx_queue_id,
uint8_t stat_idx);
* A pointer to a structure of type *ether_addr* to be filled with
* the Ethernet address of the Ethernet device.
*/
-void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
+void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr);
/**
* Retrieve the contextual information of an Ethernet device.
* A pointer to a structure of type *rte_eth_dev_info* to be filled with
* the contextual information of the Ethernet device.
*/
-void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
+void rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
/**
* Retrieve the firmware version of a device.
* - (>0) if *fw_size* is not enough to store firmware version, return
* the size of the non truncated string.
*/
-int rte_eth_dev_fw_version_get(uint8_t port_id,
+int rte_eth_dev_fw_version_get(uint16_t port_id,
char *fw_version, size_t fw_size);
/**
* count of supported ptypes will be returned.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
+int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
uint32_t *ptypes, int num);
/**
* - (0) if successful.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu);
+int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
/**
* Change the MTU of an Ethernet device.
* - (-EINVAL) if *mtu* invalid.
* - (-EBUSY) if operation is not allowed when the port is running
*/
-int rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu);
+int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
/**
* Enable/Disable hardware filtering by an Ethernet device of received
* - (-ENOSYS) if VLAN filtering on *port_id* disabled.
* - (-EINVAL) if *vlan_id* > 4095.
*/
-int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on);
+int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
/**
* Enable/Disable hardware VLAN Strip by a rx queue of an Ethernet device.
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if *rx_queue_id* invalid.
*/
-int rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id,
+int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
int on);
/**
* - (-ENOSUP) if hardware-assisted VLAN TPID setup is not supported.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
+int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
enum rte_vlan_type vlan_type,
uint16_t tag_type);
* - (-ENOSUP) if hardware-assisted VLAN filtering not configured.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
+int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
/**
* Read VLAN Offload configuration from an Ethernet device
* ETH_VLAN_EXTEND_OFFLOAD
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_get_vlan_offload(uint8_t port_id);
+int rte_eth_dev_get_vlan_offload(uint16_t port_id);
/**
* Set port based TX VLAN insertion on or off.
* - (0) if successful.
* - negative if failed.
*/
-int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
+int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
/**
*
* *rx_pkts* array.
*/
static inline uint16_t
-rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
+rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
* (-ENOTSUP) if the device does not support this function
*/
static inline int
-rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
+rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
{
struct rte_eth_dev *dev;
* - (-ENOTSUP) if the device does not support this function
*/
static inline int
-rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
+rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
* - (-ENODEV) bad port or queue (only if compiled with debug).
*/
static inline int
-rte_eth_rx_descriptor_status(uint8_t port_id, uint16_t queue_id,
+rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
uint16_t offset)
{
struct rte_eth_dev *dev;
* - (-ENOTSUP) if the device does not support this function.
* - (-ENODEV) bad port or queue (only if compiled with debug).
*/
-static inline int rte_eth_tx_descriptor_status(uint8_t port_id,
+static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
uint16_t queue_id, uint16_t offset)
{
struct rte_eth_dev *dev;
* *tx_pkts* parameter when the transmit ring is full or has been filled up.
*/
static inline uint16_t
-rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
+rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
static inline uint16_t
-rte_eth_tx_prepare(uint8_t port_id, uint16_t queue_id,
+rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct rte_eth_dev *dev;
*/
static inline uint16_t
-rte_eth_tx_prepare(__rte_unused uint8_t port_id, __rte_unused uint16_t queue_id,
+rte_eth_tx_prepare(__rte_unused uint16_t port_id,
+ __rte_unused uint16_t queue_id,
__rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
return nb_pkts;
* callback is called for any packets which could not be sent.
*/
static inline uint16_t
-rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id,
+rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
struct rte_eth_dev_tx_buffer *buffer)
{
uint16_t sent;
* the rest.
*/
static __rte_always_inline uint16_t
-rte_eth_tx_buffer(uint8_t port_id, uint16_t queue_id,
+rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
{
buffer->pkts[buffer->length++] = tx_pkt;
* are in use.
*/
int
-rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt);
+rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
/**
* The eth device event type for interrupt, and maybe others in the future.
RTE_ETH_EVENT_MAX /**< max value of this enum */
};
-typedef int (*rte_eth_dev_cb_fn)(uint8_t port_id,
+typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
enum rte_eth_event_type event, void *cb_arg, void *ret_param);
/**< user application callback to be registered for interrupts */
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_eth_dev_callback_register(uint8_t port_id,
+int rte_eth_dev_callback_register(uint16_t port_id,
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg);
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_eth_dev_callback_unregister(uint8_t port_id,
+int rte_eth_dev_callback_unregister(uint16_t port_id,
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg);
* that operation.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_rx_intr_enable(uint8_t port_id, uint16_t queue_id);
+int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
/**
* When lcore wakes up from rx interrupt indicating packet coming, disable rx
* that operation.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_rx_intr_disable(uint8_t port_id, uint16_t queue_id);
+int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
/**
* RX Interrupt control per port.
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data);
+int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
/**
* RX Interrupt control per queue.
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
+int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
int epfd, int op, void *data);
/**
* that operation.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_led_on(uint8_t port_id);
+int rte_eth_led_on(uint16_t port_id);
/**
* Turn off the LED on the Ethernet device.
* that operation.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_led_off(uint8_t port_id);
+int rte_eth_led_off(uint16_t port_id);
/**
* Get current status of the Ethernet link flow control for Ethernet device
* - (-ENOTSUP) if hardware doesn't support flow control.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_flow_ctrl_get(uint8_t port_id,
+int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
struct rte_eth_fc_conf *fc_conf);
/**
* - (-EINVAL) if bad parameter
* - (-EIO) if flow control setup failure
*/
-int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
+int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
struct rte_eth_fc_conf *fc_conf);
/**
* - (-EINVAL) if bad parameter
* - (-EIO) if flow control setup failure
*/
-int rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id,
+int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
struct rte_eth_pfc_conf *pfc_conf);
/**
* - (-ENOSPC) if no more MAC addresses can be added.
* - (-EINVAL) if MAC address is invalid.
*/
-int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
+int rte_eth_dev_mac_addr_add(uint16_t port, struct ether_addr *mac_addr,
uint32_t pool);
/**
* - (-ENODEV) if *port* invalid.
* - (-EADDRINUSE) if attempting to remove the default MAC address
*/
-int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
+int rte_eth_dev_mac_addr_remove(uint16_t port, struct ether_addr *mac_addr);
/**
* Set the default MAC address.
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if MAC address is invalid.
*/
-int rte_eth_dev_default_mac_addr_set(uint8_t port, struct ether_addr *mac_addr);
-
+int rte_eth_dev_default_mac_addr_set(uint16_t port,
+ struct ether_addr *mac_addr);
/**
* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_rss_reta_update(uint8_t port,
+int rte_eth_dev_rss_reta_update(uint16_t port,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_rss_reta_query(uint8_t port,
+int rte_eth_dev_rss_reta_query(uint16_t port,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
- uint8_t on);
+int rte_eth_dev_uc_hash_table_set(uint16_t port, struct ether_addr *addr,
+ uint8_t on);
/**
* Updates all unicast hash bitmaps for receiving packet with any Unicast
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
+int rte_eth_dev_uc_all_hash_table_set(uint16_t port, uint8_t on);
/**
* Set a traffic mirroring rule on an Ethernet device
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if the mr_conf information is not correct.
*/
-int rte_eth_mirror_rule_set(uint8_t port_id,
+int rte_eth_mirror_rule_set(uint16_t port_id,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id,
uint8_t on);
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_mirror_rule_reset(uint8_t port_id,
+int rte_eth_mirror_rule_reset(uint16_t port_id,
uint8_t rule_id);
/**
* - (-ENODEV) if *port_id* invalid.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
+int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
uint16_t tx_rate);
/**
* - (-ENOTSUP) if hardware doesn't support.
* - (-EINVAL) if bad parameter.
*/
-int rte_eth_dev_rss_hash_update(uint8_t port_id,
+int rte_eth_dev_rss_hash_update(uint16_t port_id,
struct rte_eth_rss_conf *rss_conf);
/**
* - (-ENOTSUP) if hardware doesn't support RSS.
*/
int
-rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
+rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
struct rte_eth_rss_conf *rss_conf);
/**
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
-rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
+rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
struct rte_eth_udp_tunnel *tunnel_udp);
/**
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
-rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
+rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
struct rte_eth_udp_tunnel *tunnel_udp);
/**
* - (-ENOTSUP) if hardware doesn't support this filter type.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type);
+int rte_eth_dev_filter_supported(uint16_t port_id,
+ enum rte_filter_type filter_type);
/**
* Take operations to assigned filter type on an Ethernet device.
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
+int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg);
/**
* - (-ENODEV) if port identifier is invalid.
* - (-ENOTSUP) if hardware doesn't support.
*/
-int rte_eth_dev_get_dcb_info(uint8_t port_id,
+int rte_eth_dev_get_dcb_info(uint16_t port_id,
struct rte_eth_dcb_info *dcb_info);
/**
* NULL on error.
* On success, a pointer value which can later be used to remove the callback.
*/
-void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
+void *rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param);
/**
* NULL on error.
* On success, a pointer value which can later be used to remove the callback.
*/
-void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
+void *rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param);
/**
* NULL on error.
* On success, a pointer value which can later be used to remove the callback.
*/
-void *rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
+void *rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
rte_tx_callback_fn fn, void *user_param);
/**
* - -EINVAL: The port_id or the queue_id is out of range, or the callback
* is NULL or not found for the port/queue.
*/
-int rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
+int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxtx_callback *user_cb);
/**
* - -EINVAL: The port_id or the queue_id is out of range, or the callback
* is NULL or not found for the port/queue.
*/
-int rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
+int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxtx_callback *user_cb);
/**
* - -ENOTSUP: routine is not supported by the device PMD.
* - -EINVAL: The port_id or the queue_id is out of range.
*/
-int rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
/**
* - -ENOTSUP: routine is not supported by the device PMD.
* - -EINVAL: The port_id or the queue_id is out of range.
*/
-int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
+int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
/**
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info);
+int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
/**
* Retrieve size of device EEPROM
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_eth_dev_get_eeprom_length(uint8_t port_id);
+int rte_eth_dev_get_eeprom_length(uint16_t port_id);
/**
* Retrieve EEPROM and EEPROM attribute
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
+int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
/**
* Program EEPROM with provided data
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info);
+int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
/**
* Set the list of multicast addresses to filter on an Ethernet device.
* - (-ENOTSUP) if PMD of *port_id* doesn't support multicast filtering.
* - (-ENOSPC) if *port_id* has not enough multicast filtering resources.
*/
-int rte_eth_dev_set_mc_addr_list(uint8_t port_id,
+int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_enable(uint8_t port_id);
+int rte_eth_timesync_enable(uint16_t port_id);
/**
* Disable IEEE1588/802.1AS timestamping for an Ethernet device.
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_disable(uint8_t port_id);
+int rte_eth_timesync_disable(uint16_t port_id);
/**
* Read an IEEE1588/802.1AS RX timestamp from an Ethernet device.
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_read_rx_timestamp(uint8_t port_id,
+int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
struct timespec *timestamp, uint32_t flags);
/**
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_read_tx_timestamp(uint8_t port_id,
+int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
struct timespec *timestamp);
/**
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta);
+int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
/**
* Read the time from the timesync clock on an Ethernet device.
* @return
* - 0: Success.
*/
-int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
+int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
/**
* Set the time of the timesync clock on an Ethernet device.
* - -ENODEV: The port ID is invalid.
* - -ENOTSUP: The function is not supported by the Ethernet driver.
*/
-int rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *time);
+int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
/**
* Create memzone for HW rings.
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
-rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
+rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
struct rte_eth_l2_tunnel_conf *l2_tunnel);
/**
* - (-ENOTSUP) if hardware doesn't support tunnel type.
*/
int
-rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
+rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
uint32_t mask,
uint8_t en);
* - (-ENODEV or -EINVAL) on failure.
*/
int
-rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
+rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
/**
* Get the device name from port id
* - (-EINVAL) on failure.
*/
int
-rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
+rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
/**
* Check that numbers of Rx and Tx descriptors satisfy descriptors limits from
* - (0) if successful.
* - (-ENOTSUP, -ENODEV or -EINVAL) on failure.
*/
-int rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
+int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
uint16_t *nb_rx_desc,
uint16_t *nb_tx_desc);
/* Get generic flow operations structure from a port. */
const struct rte_flow_ops *
-rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error)
+rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
const struct rte_flow_ops *ops;
* additional details.
*/
const struct rte_flow_ops *
-rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error);
+rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error);
#ifdef __cplusplus
}
/* Get generic traffic manager operations structure from a port. */
const struct rte_tm_ops *
-rte_tm_ops_get(uint8_t port_id, struct rte_tm_error *error)
+rte_tm_ops_get(uint16_t port_id, struct rte_tm_error *error)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
const struct rte_tm_ops *ops;
/* Get number of leaf nodes */
int
-rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
+rte_tm_get_number_of_leaf_nodes(uint16_t port_id,
uint32_t *n_leaf_nodes,
struct rte_tm_error *error)
{
/* Check node type (leaf or non-leaf) */
int
-rte_tm_node_type_get(uint8_t port_id,
+rte_tm_node_type_get(uint16_t port_id,
uint32_t node_id,
int *is_leaf,
struct rte_tm_error *error)
}
/* Get capabilities */
-int rte_tm_capabilities_get(uint8_t port_id,
+int rte_tm_capabilities_get(uint16_t port_id,
struct rte_tm_capabilities *cap,
struct rte_tm_error *error)
{
}
/* Get level capabilities */
-int rte_tm_level_capabilities_get(uint8_t port_id,
+int rte_tm_level_capabilities_get(uint16_t port_id,
uint32_t level_id,
struct rte_tm_level_capabilities *cap,
struct rte_tm_error *error)
}
/* Get node capabilities */
-int rte_tm_node_capabilities_get(uint8_t port_id,
+int rte_tm_node_capabilities_get(uint16_t port_id,
uint32_t node_id,
struct rte_tm_node_capabilities *cap,
struct rte_tm_error *error)
}
/* Add WRED profile */
-int rte_tm_wred_profile_add(uint8_t port_id,
+int rte_tm_wred_profile_add(uint16_t port_id,
uint32_t wred_profile_id,
struct rte_tm_wred_params *profile,
struct rte_tm_error *error)
}
/* Delete WRED profile */
-int rte_tm_wred_profile_delete(uint8_t port_id,
+int rte_tm_wred_profile_delete(uint16_t port_id,
uint32_t wred_profile_id,
struct rte_tm_error *error)
{
}
/* Add/update shared WRED context */
-int rte_tm_shared_wred_context_add_update(uint8_t port_id,
+int rte_tm_shared_wred_context_add_update(uint16_t port_id,
uint32_t shared_wred_context_id,
uint32_t wred_profile_id,
struct rte_tm_error *error)
}
/* Delete shared WRED context */
-int rte_tm_shared_wred_context_delete(uint8_t port_id,
+int rte_tm_shared_wred_context_delete(uint16_t port_id,
uint32_t shared_wred_context_id,
struct rte_tm_error *error)
{
}
/* Add shaper profile */
-int rte_tm_shaper_profile_add(uint8_t port_id,
+int rte_tm_shaper_profile_add(uint16_t port_id,
uint32_t shaper_profile_id,
struct rte_tm_shaper_params *profile,
struct rte_tm_error *error)
}
/* Delete WRED profile */
-int rte_tm_shaper_profile_delete(uint8_t port_id,
+int rte_tm_shaper_profile_delete(uint16_t port_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error)
{
}
/* Add shared shaper */
-int rte_tm_shared_shaper_add_update(uint8_t port_id,
+int rte_tm_shared_shaper_add_update(uint16_t port_id,
uint32_t shared_shaper_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error)
}
/* Delete shared shaper */
-int rte_tm_shared_shaper_delete(uint8_t port_id,
+int rte_tm_shared_shaper_delete(uint16_t port_id,
uint32_t shared_shaper_id,
struct rte_tm_error *error)
{
}
/* Add node to port traffic manager hierarchy */
-int rte_tm_node_add(uint8_t port_id,
+int rte_tm_node_add(uint16_t port_id,
uint32_t node_id,
uint32_t parent_node_id,
uint32_t priority,
}
/* Delete node from traffic manager hierarchy */
-int rte_tm_node_delete(uint8_t port_id,
+int rte_tm_node_delete(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error)
{
}
/* Suspend node */
-int rte_tm_node_suspend(uint8_t port_id,
+int rte_tm_node_suspend(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error)
{
}
/* Resume node */
-int rte_tm_node_resume(uint8_t port_id,
+int rte_tm_node_resume(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error)
{
}
/* Commit the initial port traffic manager hierarchy */
-int rte_tm_hierarchy_commit(uint8_t port_id,
+int rte_tm_hierarchy_commit(uint16_t port_id,
int clear_on_fail,
struct rte_tm_error *error)
{
}
/* Update node parent */
-int rte_tm_node_parent_update(uint8_t port_id,
+int rte_tm_node_parent_update(uint16_t port_id,
uint32_t node_id,
uint32_t parent_node_id,
uint32_t priority,
}
/* Update node private shaper */
-int rte_tm_node_shaper_update(uint8_t port_id,
+int rte_tm_node_shaper_update(uint16_t port_id,
uint32_t node_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error)
}
/* Update node shared shapers */
-int rte_tm_node_shared_shaper_update(uint8_t port_id,
+int rte_tm_node_shared_shaper_update(uint16_t port_id,
uint32_t node_id,
uint32_t shared_shaper_id,
int add,
}
/* Update node stats */
-int rte_tm_node_stats_update(uint8_t port_id,
+int rte_tm_node_stats_update(uint16_t port_id,
uint32_t node_id,
uint64_t stats_mask,
struct rte_tm_error *error)
}
/* Update WFQ weight mode */
-int rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
+int rte_tm_node_wfq_weight_mode_update(uint16_t port_id,
uint32_t node_id,
int *wfq_weight_mode,
uint32_t n_sp_priorities,
}
/* Update node congestion management mode */
-int rte_tm_node_cman_update(uint8_t port_id,
+int rte_tm_node_cman_update(uint16_t port_id,
uint32_t node_id,
enum rte_tm_cman_mode cman,
struct rte_tm_error *error)
}
/* Update node private WRED context */
-int rte_tm_node_wred_context_update(uint8_t port_id,
+int rte_tm_node_wred_context_update(uint16_t port_id,
uint32_t node_id,
uint32_t wred_profile_id,
struct rte_tm_error *error)
}
/* Update node shared WRED context */
-int rte_tm_node_shared_wred_context_update(uint8_t port_id,
+int rte_tm_node_shared_wred_context_update(uint16_t port_id,
uint32_t node_id,
uint32_t shared_wred_context_id,
int add,
}
/* Read and/or clear stats counters for specific node */
-int rte_tm_node_stats_read(uint8_t port_id,
+int rte_tm_node_stats_read(uint16_t port_id,
uint32_t node_id,
struct rte_tm_node_stats *stats,
uint64_t *stats_mask,
}
/* Packet marking - VLAN DEI */
-int rte_tm_mark_vlan_dei(uint8_t port_id,
+int rte_tm_mark_vlan_dei(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
}
/* Packet marking - IPv4/IPv6 ECN */
-int rte_tm_mark_ip_ecn(uint8_t port_id,
+int rte_tm_mark_ip_ecn(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
}
/* Packet marking - IPv4/IPv6 DSCP */
-int rte_tm_mark_ip_dscp(uint8_t port_id,
+int rte_tm_mark_ip_dscp(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
* 0 on success, non-zero error code otherwise.
*/
int
-rte_tm_get_number_of_leaf_nodes(uint8_t port_id,
+rte_tm_get_number_of_leaf_nodes(uint16_t port_id,
uint32_t *n_leaf_nodes,
struct rte_tm_error *error);
* 0 on success, non-zero error code otherwise.
*/
int
-rte_tm_node_type_get(uint8_t port_id,
+rte_tm_node_type_get(uint16_t port_id,
uint32_t node_id,
int *is_leaf,
struct rte_tm_error *error);
* 0 on success, non-zero error code otherwise.
*/
int
-rte_tm_capabilities_get(uint8_t port_id,
+rte_tm_capabilities_get(uint16_t port_id,
struct rte_tm_capabilities *cap,
struct rte_tm_error *error);
* 0 on success, non-zero error code otherwise.
*/
int
-rte_tm_level_capabilities_get(uint8_t port_id,
+rte_tm_level_capabilities_get(uint16_t port_id,
uint32_t level_id,
struct rte_tm_level_capabilities *cap,
struct rte_tm_error *error);
* 0 on success, non-zero error code otherwise.
*/
int
-rte_tm_node_capabilities_get(uint8_t port_id,
+rte_tm_node_capabilities_get(uint16_t port_id,
uint32_t node_id,
struct rte_tm_node_capabilities *cap,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::cman_wred_context_n_max
*/
int
-rte_tm_wred_profile_add(uint8_t port_id,
+rte_tm_wred_profile_add(uint16_t port_id,
uint32_t wred_profile_id,
struct rte_tm_wred_params *profile,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::cman_wred_context_n_max
*/
int
-rte_tm_wred_profile_delete(uint8_t port_id,
+rte_tm_wred_profile_delete(uint16_t port_id,
uint32_t wred_profile_id,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
*/
int
-rte_tm_shared_wred_context_add_update(uint8_t port_id,
+rte_tm_shared_wred_context_add_update(uint16_t port_id,
uint32_t shared_wred_context_id,
uint32_t wred_profile_id,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
*/
int
-rte_tm_shared_wred_context_delete(uint8_t port_id,
+rte_tm_shared_wred_context_delete(uint16_t port_id,
uint32_t shared_wred_context_id,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::shaper_n_max
*/
int
-rte_tm_shaper_profile_add(uint8_t port_id,
+rte_tm_shaper_profile_add(uint16_t port_id,
uint32_t shaper_profile_id,
struct rte_tm_shaper_params *profile,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::shaper_n_max
*/
int
-rte_tm_shaper_profile_delete(uint8_t port_id,
+rte_tm_shaper_profile_delete(uint16_t port_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::shaper_shared_n_max
*/
int
-rte_tm_shared_shaper_add_update(uint8_t port_id,
+rte_tm_shared_shaper_add_update(uint16_t port_id,
uint32_t shared_shaper_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::shaper_shared_n_max
*/
int
-rte_tm_shared_shaper_delete(uint8_t port_id,
+rte_tm_shared_shaper_delete(uint16_t port_id,
uint32_t shared_shaper_id,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities
*/
int
-rte_tm_node_add(uint8_t port_id,
+rte_tm_node_add(uint16_t port_id,
uint32_t node_id,
uint32_t parent_node_id,
uint32_t priority,
* @see RTE_TM_UPDATE_NODE_ADD_DELETE
*/
int
-rte_tm_node_delete(uint8_t port_id,
+rte_tm_node_delete(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error);
* @see RTE_TM_UPDATE_NODE_SUSPEND_RESUME
*/
int
-rte_tm_node_suspend(uint8_t port_id,
+rte_tm_node_suspend(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error);
* @see RTE_TM_UPDATE_NODE_SUSPEND_RESUME
*/
int
-rte_tm_node_resume(uint8_t port_id,
+rte_tm_node_resume(uint16_t port_id,
uint32_t node_id,
struct rte_tm_error *error);
* @see rte_tm_node_delete()
*/
int
-rte_tm_hierarchy_commit(uint8_t port_id,
+rte_tm_hierarchy_commit(uint16_t port_id,
int clear_on_fail,
struct rte_tm_error *error);
* @see RTE_TM_UPDATE_NODE_PARENT_CHANGE_LEVEL
*/
int
-rte_tm_node_parent_update(uint8_t port_id,
+rte_tm_node_parent_update(uint16_t port_id,
uint32_t node_id,
uint32_t parent_node_id,
uint32_t priority,
* @see struct rte_tm_capabilities::shaper_private_n_max
*/
int
-rte_tm_node_shaper_update(uint8_t port_id,
+rte_tm_node_shaper_update(uint16_t port_id,
uint32_t node_id,
uint32_t shaper_profile_id,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::shaper_shared_n_max
*/
int
-rte_tm_node_shared_shaper_update(uint8_t port_id,
+rte_tm_node_shared_shaper_update(uint16_t port_id,
uint32_t node_id,
uint32_t shared_shaper_id,
int add,
* @see RTE_TM_UPDATE_NODE_STATS
*/
int
-rte_tm_node_stats_update(uint8_t port_id,
+rte_tm_node_stats_update(uint16_t port_id,
uint32_t node_id,
uint64_t stats_mask,
struct rte_tm_error *error);
* @see RTE_TM_UPDATE_NODE_N_SP_PRIORITIES
*/
int
-rte_tm_node_wfq_weight_mode_update(uint8_t port_id,
+rte_tm_node_wfq_weight_mode_update(uint16_t port_id,
uint32_t node_id,
int *wfq_weight_mode,
uint32_t n_sp_priorities,
* @see RTE_TM_UPDATE_NODE_CMAN
*/
int
-rte_tm_node_cman_update(uint8_t port_id,
+rte_tm_node_cman_update(uint16_t port_id,
uint32_t node_id,
enum rte_tm_cman_mode cman,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::cman_wred_context_private_n_max
*/
int
-rte_tm_node_wred_context_update(uint8_t port_id,
+rte_tm_node_wred_context_update(uint16_t port_id,
uint32_t node_id,
uint32_t wred_profile_id,
struct rte_tm_error *error);
* @see struct rte_tm_capabilities::cman_wred_context_shared_n_max
*/
int
-rte_tm_node_shared_wred_context_update(uint8_t port_id,
+rte_tm_node_shared_wred_context_update(uint16_t port_id,
uint32_t node_id,
uint32_t shared_wred_context_id,
int add,
* @see enum rte_tm_stats_type
*/
int
-rte_tm_node_stats_read(uint8_t port_id,
+rte_tm_node_stats_read(uint16_t port_id,
uint32_t node_id,
struct rte_tm_node_stats *stats,
uint64_t *stats_mask,
* @see struct rte_tm_capabilities::mark_vlan_dei_supported
*/
int
-rte_tm_mark_vlan_dei(uint8_t port_id,
+rte_tm_mark_vlan_dei(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
* @see struct rte_tm_capabilities::mark_ip_ecn_sctp_supported
*/
int
-rte_tm_mark_ip_ecn(uint8_t port_id,
+rte_tm_mark_ip_ecn(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
* @see struct rte_tm_capabilities::mark_ip_dscp_supported
*/
int
-rte_tm_mark_ip_dscp(uint8_t port_id,
+rte_tm_mark_ip_dscp(uint16_t port_id,
int mark_green,
int mark_yellow,
int mark_red,
* success, NULL otherwise.
*/
const struct rte_tm_ops *
-rte_tm_ops_get(uint8_t port_id, struct rte_tm_error *error);
+rte_tm_ops_get(uint16_t port_id, struct rte_tm_error *error);
#ifdef __cplusplus
}
* Structure which has the function pointers for KNI interface.
*/
struct rte_kni_ops {
- uint8_t port_id; /* Port ID */
+ uint16_t port_id; /* Port ID */
/* Pointer to function of changing MTU */
- int (*change_mtu)(uint8_t port_id, unsigned new_mtu);
+ int (*change_mtu)(uint16_t port_id, unsigned int new_mtu);
/* Pointer to function of configuring network interface */
- int (*config_network_if)(uint8_t port_id, uint8_t if_up);
+ int (*config_network_if)(uint16_t port_id, uint8_t if_up);
};
/**
}
static uint16_t
-add_time_stamps(uint8_t pid __rte_unused,
+add_time_stamps(uint16_t pid __rte_unused,
uint16_t qid __rte_unused,
struct rte_mbuf **pkts,
uint16_t nb_pkts,
}
static uint16_t
-calc_latency(uint8_t pid __rte_unused,
+calc_latency(uint16_t pid __rte_unused,
uint16_t qid __rte_unused,
struct rte_mbuf **pkts,
uint16_t nb_pkts,
rte_latency_stats_flow_type_fn user_cb)
{
unsigned int i;
- uint8_t pid;
+ uint16_t pid;
uint16_t qid;
struct rxtx_cbs *cbs = NULL;
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count();
const char *ptr_strings[NUM_LATENCY_STATS] = {0};
const struct rte_memzone *mz = NULL;
const unsigned int flags = 0;
int
rte_latencystats_uninit(void)
{
- uint8_t pid;
+ uint16_t pid;
uint16_t qid;
int ret = 0;
struct rxtx_cbs *cbs = NULL;
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count();
/** De register Rx/Tx callbacks */
for (pid = 0; pid < nb_ports; pid++) {
EXPORT_MAP := rte_pdump_version.map
-LIBABIVER := 1
+LIBABIVER := 2
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_PDUMP) := rte_pdump.c
}
static uint16_t
-pdump_rx(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+pdump_rx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_params)
}
static uint16_t
-pdump_tx(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+pdump_tx(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
{
pdump_copy(pkts, nb_pkts, user_params);
}
static int
-pdump_regitser_rx_callbacks(uint16_t end_q, uint8_t port, uint16_t queue,
+pdump_regitser_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
struct rte_ring *ring, struct rte_mempool *mp,
uint16_t operation)
{
}
static int
-pdump_regitser_tx_callbacks(uint16_t end_q, uint8_t port, uint16_t queue,
+pdump_regitser_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
struct rte_ring *ring, struct rte_mempool *mp,
uint16_t operation)
{
set_pdump_rxtx_cbs(struct pdump_request *p)
{
uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
- uint8_t port;
+ uint16_t port;
int ret = 0;
uint32_t flags;
uint16_t operation;
}
static int
-pdump_validate_port(uint8_t port, char *name)
+pdump_validate_port(uint16_t port, char *name)
{
int ret = 0;
}
int
-rte_pdump_enable(uint8_t port, uint16_t queue, uint32_t flags,
+rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
struct rte_ring *ring,
struct rte_mempool *mp,
void *filter)
}
int
-rte_pdump_disable(uint8_t port, uint16_t queue, uint32_t flags)
+rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags)
{
int ret = 0;
char name[DEVICE_ID_SIZE];
*/
int
-rte_pdump_enable(uint8_t port, uint16_t queue, uint32_t flags,
+rte_pdump_enable(uint16_t port, uint16_t queue, uint32_t flags,
struct rte_ring *ring,
struct rte_mempool *mp,
void *filter);
*/
int
-rte_pdump_disable(uint8_t port, uint16_t queue, uint32_t flags);
+rte_pdump_disable(uint16_t port, uint16_t queue, uint32_t flags);
/**
* Enables packet capturing on given device id and queue.
struct rte_port_in_stats stats;
uint16_t queue_id;
- uint8_t port_id;
+ uint16_t port_id;
};
static void *
uint16_t tx_buf_count;
uint64_t bsz_mask;
uint16_t queue_id;
- uint8_t port_id;
+ uint16_t port_id;
};
static void *
uint64_t bsz_mask;
uint64_t n_retries;
uint16_t queue_id;
- uint8_t port_id;
+ uint16_t port_id;
};
static void *
/** ethdev_reader port parameters */
struct rte_port_ethdev_reader_params {
/** NIC RX port ID */
- uint8_t port_id;
+ uint16_t port_id;
/** NIC RX queue ID */
uint16_t queue_id;
/** ethdev_writer port parameters */
struct rte_port_ethdev_writer_params {
/** NIC RX port ID */
- uint8_t port_id;
+ uint16_t port_id;
/** NIC RX queue ID */
uint16_t queue_id;
/** ethdev_writer_nodrop port parameters */
struct rte_port_ethdev_writer_nodrop_params {
/** NIC RX port ID */
- uint8_t port_id;
+ uint16_t port_id;
/** NIC RX queue ID */
uint16_t queue_id;
}
/* Callback for request of changing MTU */
static int
-kni_change_mtu(uint8_t port_id, unsigned new_mtu)
+kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
{
printf("Change MTU of port %d to %u\n", port_id, new_mtu);
kni_pkt_mtu = new_mtu;
}
static int
-test_kni_processing(uint8_t port_id, struct rte_mempool *mp)
+test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
{
int ret = 0;
unsigned i;
conf.core_id = 1;
conf.force_bind = 1;
conf.mbuf_size = MAX_PACKET_SZ;
- conf.group_id = (uint16_t)port_id;
+ conf.group_id = port_id;
ops = kni_ops;
ops.port_id = port_id;
test_kni(void)
{
int ret = -1;
- uint8_t nb_ports, port_id;
+ uint16_t nb_ports, port_id;
struct rte_kni *kni;
struct rte_mempool *mp;
struct rte_kni_conf conf;
rte_eth_dev_info_get(port_id, &info);
conf.addr = info.pci_dev->addr;
conf.id = info.pci_dev->id;
- conf.group_id = (uint16_t)port_id;
+ conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
ops = kni_ops;
rte_eth_dev_info_get(port_id, &info);
conf.addr = info.pci_dev->addr;
conf.id = info.pci_dev->id;
- conf.group_id = (uint16_t)port_id;
+ conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
ops = kni_ops;
uint8_t bonded_mac[] = {0xAA, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF };
struct link_bonding_unittest_params {
- int8_t bonded_port_id;
- int8_t slave_port_ids[TEST_MAX_NUMBER_OF_PORTS];
- uint8_t bonded_slave_count;
+ int16_t bonded_port_id;
+ int16_t slave_port_ids[TEST_MAX_NUMBER_OF_PORTS];
+ uint16_t bonded_slave_count;
uint8_t bonding_mode;
uint16_t nb_rx_q;
{
int current_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
/* Don't try to recreate bonded device if re-running test suite*/
if (test_params->bonded_port_id == -1) {
{
int current_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
TEST_ASSERT_SUCCESS(rte_eth_bond_slave_add(test_params->bonded_port_id,
test_params->slave_port_ids[test_params->bonded_slave_count]),
{
int current_slave_count;
struct ether_addr read_mac_addr, *mac_addr;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
TEST_ASSERT_SUCCESS(rte_eth_bond_slave_remove(test_params->bonded_port_id,
test_params->slave_port_ids[test_params->bonded_slave_count-1]),
test_add_already_bonded_slave_to_bonded_device(void)
{
int port_id, current_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
char pmd_name[RTE_ETH_NAME_MAX_LEN];
test_add_slave_to_bonded_device();
test_get_slaves_from_bonded_device(void)
{
int current_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(),
"Failed to add slave to bonded device");
struct rte_eth_link link_status;
int current_slave_count, current_bonding_mode, primary_port;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
/* Add slave to bonded device*/
TEST_ASSERT_SUCCESS(test_add_slave_to_bonded_device(),
test_stop_bonded_device(void)
{
int current_slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
struct rte_eth_link link_status;
{
int i, slave_count, bonded_port_id;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int slave_port_ids[BONDED_INIT_MAC_ASSIGNMENT_SLAVE_COUNT];
struct ether_addr slave_mac_addr, bonded_mac_addr, read_mac_addr;
static int
initialize_bonded_device_with_slaves(uint8_t bonding_mode, uint8_t bond_en_isr,
- uint8_t number_of_slaves, uint8_t enable_slave)
+ uint16_t number_of_slaves, uint8_t enable_slave)
{
/* Configure bonded device */
TEST_ASSERT_SUCCESS(configure_ethdev(test_params->bonded_port_id, 0,
static int
-test_bonding_lsc_event_callback(uint8_t port_id __rte_unused,
+test_bonding_lsc_event_callback(uint16_t port_id __rte_unused,
enum rte_eth_event_type type __rte_unused,
void *param __rte_unused,
void *ret_param __rte_unused)
test_status_interrupt(void)
{
int slave_count;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
/* initialized bonding device with T slaves */
TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves(
static int
generate_test_burst(struct rte_mbuf **pkts_burst, uint16_t burst_size,
uint8_t vlan, uint8_t ipv4, uint8_t toggle_dst_mac,
- uint8_t toggle_ip_addr, uint8_t toggle_udp_port)
+ uint8_t toggle_ip_addr, uint16_t toggle_udp_port)
{
uint16_t pktlen, generated_burst_size, ether_type;
void *ip_hdr;
struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL };
struct rte_eth_stats port_stats;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int i, burst_size, slave_count;
struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL };
struct rte_eth_stats port_stats;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int i, burst_size, slave_count, primary_port;
struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL };
struct rte_eth_stats port_stats;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int i, burst_size, slave_count;
struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL };
struct rte_eth_stats port_stats;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int i, burst_size, slave_count;
struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST] = { NULL };
struct rte_eth_stats port_stats;
- uint8_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t slaves[RTE_MAX_ETHPORTS];
int i, burst_size, slave_count, primary_port;
}
static void
-lacp_recv_cb(uint8_t slave_id, struct rte_mbuf *lacp_pkt)
+lacp_recv_cb(uint16_t slave_id, struct rte_mbuf *lacp_pkt)
{
struct ether_hdr *hdr;
struct slow_protocol_frame *slow_hdr;
}
static int
-initialize_bonded_device_with_slaves(uint8_t slave_count, uint8_t external_sm)
+initialize_bonded_device_with_slaves(uint16_t slave_count, uint8_t external_sm)
{
uint8_t i;
{
struct slave_conf *slave;
int retval;
- uint8_t slaves[RTE_MAX_ETHPORTS];
- uint8_t i;
+ uint16_t slaves[RTE_MAX_ETHPORTS];
+ uint16_t i;
rte_eth_dev_stop(test_params.bonded_port_id);
char name[RTE_ETH_NAME_MAX_LEN];
struct slave_conf *port;
const uint8_t socket_id = rte_socket_id();
- uint8_t i;
+ uint16_t i;
if (test_params.mbuf_pool == NULL) {
nb_mbuf_per_pool = TEST_RX_DESC_MAX + DEF_PKT_BURST +
{
struct slave_conf *port;
uint8_t i, env_state;
- uint8_t slaves[RTE_DIM(test_params.slave_ports)];
+ uint16_t slaves[RTE_DIM(test_params.slave_ports)];
int slaves_count;
env_state = 0;
#define MAX_PKT_BURST (32)
#define RTE_TEST_RX_DESC_DEFAULT (128)
#define RTE_TEST_TX_DESC_DEFAULT (512)
-#define RTE_PORT_ALL (~(uint8_t)0x0)
+#define RTE_PORT_ALL (~(uint16_t)0x0)
/* how long test would take at full line rate */
#define RTE_TEST_DURATION (2)
uint8_t status;
uint8_t socketid;
uint16_t nb_ports;
- uint8_t portlist[RTE_MAX_ETHPORTS];
+ uint16_t portlist[RTE_MAX_ETHPORTS];
} __rte_cache_aligned;
struct lcore_conf lcore_conf[RTE_MAX_LCORE];
/* Check the link status of all ports in up to 3s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 30 /* 3s (30 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("Checking link statuses...\n");
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status) {
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
if (link_mbps == 0)
link_mbps = link.link_speed;
} else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
}
static void
-stats_display(uint8_t port_id)
+stats_display(uint16_t port_id)
{
struct rte_eth_stats stats;
rte_eth_stats_get(port_id, &stats);
while (likely(!stop)) {
for (i = 0; i < conf->nb_ports; i++) {
portid = conf->portlist[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
if (unlikely(nb_rx == 0)) {
idle++;
portid = conf->portlist[i];
cur_tsc = rte_rdtsc();
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
if (unlikely(nb_rx == 0)) {
idle++;
while (likely(!stop)) {
for (i = 0; i < conf->nb_ports; i++) {
portid = conf->portlist[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
if (unlikely(nb_rx == 0)) {
idle++;
portid = conf->portlist[i];
int nb_free = pkt_per_port;
do { /* dry out */
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
nb_tx = 0;
while (nb_tx < nb_rx)
while (total) {
for (i = 0; i < conf->nb_ports; i++) {
portid = conf->portlist[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
&pkts_burst[next[portid]],
MAX_PKT_BURST);
if (unlikely(nb_rx == 0)) {
/* The ring structure used for tests */
static struct rte_ring *r;
-static uint8_t ring_ethdev_port;
+static uint16_t ring_ethdev_port;
/* Get cycle counts for dequeuing from an empty ring. Should be 2 or 3 cycles */
static void
void
-virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
}
void
-virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
}
void
-virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
}
void
-virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
}
void
-virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
struct virtual_ethdev_private *dev_private = dev->data->dev_private;
void
-virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
{
struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
void
-virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success)
+virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success)
{
struct virtual_ethdev_private *dev_private = NULL;
struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
}
void
-virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id,
+virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
uint8_t packet_fail_count)
{
struct virtual_ethdev_private *dev_private = NULL;
}
void
-virtual_ethdev_set_link_status(uint8_t port_id, uint8_t link_status)
+virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status)
{
struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
}
void
-virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id,
+virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
uint8_t link_status)
{
struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
}
int
-virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
+virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,
struct rte_mbuf **pkt_burst, int burst_length)
{
struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
}
int
-virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,
+virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,
struct rte_mbuf **pkt_burst, int burst_length)
{
struct virtual_ethdev_private *dev_private;
uint8_t socket_id, uint8_t isr_support);
void
-virtual_ethdev_set_link_status(uint8_t port_id, uint8_t link_status);
+virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status);
void
-virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id,
+virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
uint8_t link_status);
int
-virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
+virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,
struct rte_mbuf **pkts_burst, int burst_length);
int
-virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,
+virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,
struct rte_mbuf **pkt_burst, int burst_length);
/** Control methods for the dev_ops functions pointer to control the behavior
* of the Virtual PMD */
void
-virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success);
void
-virtual_ethdev_stop_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_stop_fn_set_success(uint16_t port_id, uint8_t success);
void
-virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success);
void
-virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id,
+ uint8_t success);
void
-virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id,
+ uint8_t success);
void
-virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success);
void
-virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success);
void
-virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success);
+virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success);
/* if a value greater than zero is set for packet_fail_count then virtual
* device tx burst function will fail that many packet from burst or all
* packets if packet_fail_count is greater than the number of packets in the
* burst */
void
-virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id,
+virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
uint8_t packet_fail_count);
#ifdef __cplusplus