static struct rte_eth_dev *
eth_dev_allocated(const char *name)
{
- unsigned i;
+ uint16_t i;
+
+ RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
if (rte_eth_devices[i].data != NULL &&
static uint16_t
eth_dev_find_free_port(void)
{
- unsigned i;
+ uint16_t i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
/* Using shared name field to find a free port. */
int
rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
{
- uint32_t pid;
+ uint16_t pid;
if (name == NULL) {
RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
return -EINVAL;
}
} else {
- const struct rte_eth_rxseg_split *rx_seg =
- (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
- uint16_t n_seg = rx_conf->rx_nseg;
+ const struct rte_eth_rxseg_split *rx_seg;
+ uint16_t n_seg;
/* Extended multi-segment configuration check. */
if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
"Memory pool is null and no extended configuration provided\n");
return -EINVAL;
}
+
+ rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
+ n_seg = rx_conf->rx_nseg;
+
if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
&mbp_buf_size,
RTE_INIT(eth_dev_init_cb_lists)
{
- int i;
+ uint16_t i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++)
TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
{
struct rte_eth_dev *dev;
struct rte_eth_dev_callback *user_cb;
- uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
+ uint16_t next_port;
uint16_t last_port;
if (!cb_fn)
int ret;
struct rte_eth_dev *dev;
struct rte_eth_dev_callback *cb, *next;
- uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
+ uint16_t next_port;
uint16_t last_port;
if (!cb_fn)
return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
}
-int
-rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
- struct rte_eth_l2_tunnel_conf *l2_tunnel)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (l2_tunnel == NULL) {
- RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
- return -EINVAL;
- }
-
- if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
- RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
- return -EINVAL;
- }
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
- -ENOTSUP);
- return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
- l2_tunnel));
-}
-
-int
-rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
- struct rte_eth_l2_tunnel_conf *l2_tunnel,
- uint32_t mask,
- uint8_t en)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- if (l2_tunnel == NULL) {
- RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
- return -EINVAL;
- }
-
- if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
- RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
- return -EINVAL;
- }
-
- if (mask == 0) {
- RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
- return -EINVAL;
- }
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
- -ENOTSUP);
- return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
- l2_tunnel, mask, en));
-}
-
static void
eth_dev_adjust_nb_desc(uint16_t *nb_desc,
const struct rte_eth_desc_lim *desc_lim)
int
rte_eth_switch_domain_alloc(uint16_t *domain_id)
{
- unsigned int i;
+ uint16_t i;
*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;