* handled by rte_intr_rx_ctl().
*/
eth_dev->intr_handle = &priv->intr_handle;
- priv->dev = eth_dev;
+ priv->dev_data = eth_dev->data;
eth_dev->dev_ops = &mlx4_dev_ops;
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
- mlx4_dev_set_link_up(priv->dev);
+ mlx4_dev_set_link_up(eth_dev);
/* Update link status once if waiting for LSC. */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
mlx4_link_update(eth_dev, 0);
struct mlx4_priv {
LIST_ENTRY(mlx4_priv) mem_event_cb;
/**< Called by memory event callback. */
- struct rte_eth_dev *dev; /**< Ethernet device. */
+ struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct ibv_context *ctx; /**< Verbs context. */
struct ibv_device_attr device_attr; /**< Device properties. */
struct ibv_pd *pd; /**< Protection Domain. */
/**< Configured MAC addresses. Unused entries are zeroed. */
};
+#define PORT_ID(priv) ((priv)->dev_data->port_id)
+#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
+
/* mlx4_ethdev.c */
int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]);
if (flow->rss)
break;
queue = action->conf;
- if (queue->index >= priv->dev->data->nb_rx_queues) {
+ if (queue->index >= ETH_DEV(priv)->data->nb_rx_queues) {
msg = "queue target index beyond number of"
" configured Rx queues";
goto exit_action_not_supported;
/* Sanity checks. */
for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
- priv->dev->data->nb_rx_queues)
+ ETH_DEV(priv)->data->nb_rx_queues)
break;
if (i != rss->queue_num) {
msg = "queue index target beyond number of"
/* Stop at the first nonexistent target queue. */
for (i = 0; i != rss->queues; ++i)
if (rss->queue_id[i] >=
- priv->dev->data->nb_rx_queues ||
- !priv->dev->data->rx_queues[rss->queue_id[i]]) {
+ ETH_DEV(priv)->data->nb_rx_queues ||
+ !ETH_DEV(priv)->data->rx_queues[rss->queue_id[i]]) {
missing = 1;
break;
}
mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
{
while (vlan < 4096) {
- if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
+ if (ETH_DEV(priv)->data->vlan_filter_conf.ids[vlan / 64] &
(UINT64_C(1) << (vlan % 64)))
return vlan;
++vlan;
* get RSS by default.
*/
uint32_t queues =
- rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
+ rte_align32pow2(ETH_DEV(priv)->data->nb_rx_queues + 1) >> 1;
uint16_t queue[queues];
struct rte_flow_action_rss action_rss = {
.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
};
struct ether_addr *rule_mac = ð_spec.dst;
rte_be16_t *rule_vlan =
- (priv->dev->data->dev_conf.rxmode.offloads &
+ (ETH_DEV(priv)->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER) &&
- !priv->dev->data->promiscuous ?
+ !ETH_DEV(priv)->data->promiscuous ?
&vlan_spec.tci :
NULL;
uint16_t vlan = 0;
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
memcpy(rule_mac, mac, sizeof(*mac));
- flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern,
actions, error);
if (!flow) {
err = -rte_errno;
goto next_vlan;
}
/* Take care of promiscuous and all multicast flow rules. */
- if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
+ if (ETH_DEV(priv)->data->promiscuous ||
+ ETH_DEV(priv)->data->all_multicast) {
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_NEXT(flow, next)) {
- if (priv->dev->data->promiscuous) {
+ if (ETH_DEV(priv)->data->promiscuous) {
if (flow->promisc)
break;
} else {
- assert(priv->dev->data->all_multicast);
+ assert(ETH_DEV(priv)->data->all_multicast);
if (flow->allmulti)
break;
}
}
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
- if (priv->dev->data->promiscuous) {
+ if (ETH_DEV(priv)->data->promiscuous) {
pattern[1].spec = NULL;
pattern[1].mask = NULL;
} else {
- assert(priv->dev->data->all_multicast);
+ assert(ETH_DEV(priv)->data->all_multicast);
pattern[1].spec = ð_allmulti;
pattern[1].mask = ð_allmulti;
}
pattern[2] = pattern[3];
- flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ flow = mlx4_flow_create(ETH_DEV(priv), &attr, pattern,
actions, error);
if (!flow) {
err = -rte_errno;
struct rte_flow *next = LIST_NEXT(flow, next);
if (!flow->select)
- claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
+ error));
else
flow->select = 0;
flow = next;
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_FIRST(&priv->flows))
- claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
+ error));
} else {
/* Refresh internal rules. */
ret = mlx4_flow_internal(priv, error);
struct rte_flow *flow;
while ((flow = LIST_FIRST(&priv->flows)))
- mlx4_flow_destroy(priv->dev, flow, NULL);
+ mlx4_flow_destroy(ETH_DEV(priv), flow, NULL);
assert(LIST_EMPTY(&priv->rss));
}
mlx4_rx_intr_vec_enable(struct mlx4_priv *priv)
{
unsigned int i;
- unsigned int rxqs_n = priv->dev->data->nb_rx_queues;
+ unsigned int rxqs_n = ETH_DEV(priv)->data->nb_rx_queues;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
unsigned int count = 0;
struct rte_intr_handle *intr_handle = &priv->intr_handle;
return -rte_errno;
}
for (i = 0; i != n; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
/* Skip queues that cannot request interrupts. */
if (!rxq || !rxq->channel) {
mlx4_link_status_alarm(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ Ð_DEV(priv)->data->dev_conf.intr_conf;
assert(priv->intr_alarm == 1);
priv->intr_alarm = 0;
if (intr_conf->lsc && !mlx4_link_status_check(priv))
- _rte_eth_dev_callback_process(priv->dev,
+ _rte_eth_dev_callback_process(ETH_DEV(priv),
RTE_ETH_EVENT_INTR_LSC,
NULL);
}
static int
mlx4_link_status_check(struct mlx4_priv *priv)
{
- struct rte_eth_link *link = &priv->dev->data->dev_link;
- int ret = mlx4_link_update(priv->dev, 0);
+ struct rte_eth_link *link = Ð_DEV(priv)->data->dev_link;
+ int ret = mlx4_link_update(ETH_DEV(priv), 0);
if (ret)
return ret;
uint32_t caught[RTE_DIM(type)] = { 0 };
struct ibv_async_event event;
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ Ð_DEV(priv)->data->dev_conf.intr_conf;
unsigned int i;
/* Read all message and acknowledge them. */
}
for (i = 0; i != RTE_DIM(caught); ++i)
if (caught[i])
- _rte_eth_dev_callback_process(priv->dev, type[i],
+ _rte_eth_dev_callback_process(ETH_DEV(priv), type[i],
NULL);
}
mlx4_intr_install(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ Ð_DEV(priv)->data->dev_conf.intr_conf;
int rc;
mlx4_intr_uninstall(priv);
mlx4_rxq_intr_enable(struct mlx4_priv *priv)
{
const struct rte_intr_conf *const intr_conf =
- &priv->dev->data->dev_conf.intr_conf;
+ Ð_DEV(priv)->data->dev_conf.intr_conf;
if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
goto error;
rte_rwlock_read_lock(&mlx4_mem_event_rwlock);
/* Iterate all the existing mlx4 devices. */
LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb)
- mlx4_mr_mem_event_free_cb(priv->dev, addr, len);
+ mlx4_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
rte_rwlock_read_unlock(&mlx4_mem_event_rwlock);
break;
case RTE_MEM_EVENT_ALLOC:
DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
- return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+ return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
/**
DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
- return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+ return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
/**
struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
struct mlx4_priv *priv = txq->priv;
- mlx4_mr_update_ext_mp(priv->dev, mr_ctrl, mp);
+ mlx4_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
return mlx4_tx_addr2mr_bh(txq, addr);
}
struct ibv_wq *ind_tbl[rss->queues];
struct mlx4_priv *priv = rss->priv;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
const char *msg;
unsigned int i = 0;
int ret;
uint16_t id = rss->queue_id[i];
struct rxq *rxq = NULL;
- if (id < priv->dev->data->nb_rx_queues)
- rxq = priv->dev->data->rx_queues[id];
+ if (id < dev->data->nb_rx_queues)
+ rxq = dev->data->rx_queues[id];
if (!rxq) {
ret = EINVAL;
msg = "RSS target queue is not configured";
rss->ind = NULL;
}
while (i--)
- mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+ mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
ERROR("mlx4: %s", msg);
--rss->usecnt;
rte_errno = ret;
mlx4_rss_detach(struct mlx4_rss *rss)
{
struct mlx4_priv *priv = rss->priv;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
unsigned int i;
assert(rss->refcnt);
claim_zero(mlx4_glue->destroy_rwq_ind_table(rss->ind));
rss->ind = NULL;
for (i = 0; i != rss->queues; ++i)
- mlx4_rxq_detach(priv->dev->data->rx_queues[rss->queue_id[i]]);
+ mlx4_rxq_detach(dev->data->rx_queues[rss->queue_id[i]]);
}
/**
int
mlx4_rss_init(struct mlx4_priv *priv)
{
- struct rte_eth_dev *dev = priv->dev;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
uint8_t log2_range = rte_log2_u32(dev->data->nb_rx_queues);
uint32_t wq_num_prev = 0;
const char *msg;
if (priv->rss_init)
return 0;
- if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) {
+ if (ETH_DEV(priv)->data->nb_rx_queues > priv->hw_rss_max_qps) {
ERROR("RSS does not support more than %d queues",
priv->hw_rss_max_qps);
rte_errno = EINVAL;
rte_errno = ret;
return -ret;
}
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
struct ibv_cq *cq;
struct ibv_wq *wq;
uint32_t wq_num;
ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
i, msg, strerror(ret));
while (i--) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq)
mlx4_rxq_detach(rxq);
if (!priv->rss_init)
return;
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
- struct rxq *rxq = priv->dev->data->rx_queues[i];
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i) {
+ struct rxq *rxq = ETH_DEV(priv)->data->rx_queues[i];
if (rxq) {
assert(rxq->usecnt == 1);
}
struct mlx4_priv *priv = rxq->priv;
- struct rte_eth_dev *dev = priv->dev;
+ struct rte_eth_dev *dev = ETH_DEV(priv);
const uint32_t elts_n = 1 << rxq->elts_n;
const uint32_t sges_n = 1 << rxq->sges_n;
struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
}
/* Pre-register Rx mempool. */
DEBUG("port %u Rx queue %u registering mp %s having %u chunks",
- priv->dev->data->port_id, rxq->stats.idx,
+ ETH_DEV(priv)->data->port_id, rxq->stats.idx,
rxq->mp->name, rxq->mp->nb_mem_chunks);
mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp);
wqes = (volatile struct mlx4_wqe_data_seg (*)[])
if (rxq == NULL)
return;
priv = rxq->priv;
- for (i = 0; i != priv->dev->data->nb_rx_queues; ++i)
- if (priv->dev->data->rx_queues[i] == rxq) {
+ for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i)
+ if (ETH_DEV(priv)->data->rx_queues[i] == rxq) {
DEBUG("%p: removing Rx queue %p from list",
- (void *)priv->dev, (void *)rxq);
- priv->dev->data->rx_queues[i] = NULL;
+ (void *)ETH_DEV(priv), (void *)rxq);
+ ETH_DEV(priv)->data->rx_queues[i] = NULL;
break;
}
assert(!rxq->cq);
if (txq == NULL)
return;
priv = txq->priv;
- for (i = 0; i != priv->dev->data->nb_tx_queues; ++i)
- if (priv->dev->data->tx_queues[i] == txq) {
+ for (i = 0; i != ETH_DEV(priv)->data->nb_tx_queues; ++i)
+ if (ETH_DEV(priv)->data->tx_queues[i] == txq) {
DEBUG("%p: removing Tx queue %p from list",
- (void *)priv->dev, (void *)txq);
- priv->dev->data->tx_queues[i] = NULL;
+ (void *)ETH_DEV(priv), (void *)txq);
+ ETH_DEV(priv)->data->tx_queues[i] = NULL;
break;
}
mlx4_txq_free_elts(txq);