* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
mlx4_proc_priv_init(struct rte_eth_dev *dev)
{
struct mlx4_proc_priv *ppriv;
*/
ppriv_size = sizeof(struct mlx4_proc_priv) +
dev->data->nb_tx_queues * sizeof(void *);
- ppriv = rte_malloc_socket("mlx4_proc_priv", ppriv_size,
- RTE_CACHE_LINE_SIZE, dev->device->numa_node);
+ ppriv = rte_zmalloc_socket("mlx4_proc_priv", ppriv_size,
+ RTE_CACHE_LINE_SIZE, dev->device->numa_node);
if (!ppriv) {
rte_errno = ENOMEM;
return -rte_errno;
}
- ppriv->uar_table_sz = ppriv_size;
+ ppriv->uar_table_sz = dev->data->nb_tx_queues;
dev->process_private = ppriv;
return 0;
}
* @param dev
* Pointer to Ethernet device structure.
*/
-static void
+void
mlx4_proc_priv_uninit(struct rte_eth_dev *dev)
{
if (!dev->process_private)
#define PORT_ID(priv) ((priv)->dev_data->port_id)
#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
+int mlx4_proc_priv_init(struct rte_eth_dev *dev);
+void mlx4_proc_priv_uninit(struct rte_eth_dev *dev);
+
+
/* mlx4_ethdev.c */
int mlx4_get_ifname(const struct mlx4_priv *priv, char (*ifname)[IF_NAMESIZE]);
const struct mlx4_mp_param *param =
(const struct mlx4_mp_param *)mp_msg->param;
struct rte_eth_dev *dev;
+#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
+ struct mlx4_proc_priv *ppriv;
+#endif
int ret;
MLX4_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
rte_mb();
dev->tx_pkt_burst = mlx4_tx_burst;
dev->rx_pkt_burst = mlx4_rx_burst;
+#ifdef HAVE_IBV_MLX4_UAR_MMAP_OFFSET
+ ppriv = (struct mlx4_proc_priv *)dev->process_private;
+ if (ppriv->uar_table_sz != dev->data->nb_tx_queues) {
+ mlx4_tx_uar_uninit_secondary(dev);
+ mlx4_proc_priv_uninit(dev);
+ ret = mlx4_proc_priv_init(dev);
+ if (ret)
+ return -rte_errno;
+ ret = mlx4_tx_uar_init_secondary(dev, mp_msg->fds[0]);
+ if (ret) {
+ mlx4_proc_priv_uninit(dev);
+ return -rte_errno;
+ }
+ }
+#endif
mp_init_msg(dev, &mp_res, param->type);
res->result = 0;
ret = rte_mp_reply(&mp_res, peer);
struct rte_mp_reply mp_rep;
struct mlx4_mp_param *res __rte_unused;
struct timespec ts = {.tv_sec = MLX4_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
+ struct mlx4_priv *priv;
int ret;
int i;
return;
}
mp_init_msg(dev, &mp_req, type);
+ if (type == MLX4_MP_REQ_START_RXTX) {
+ priv = dev->data->dev_private;
+ mp_req.num_fds = 1;
+ mp_req.fds[0] = priv->ctx->cmd_fd;
+ }
ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
if (ret) {
if (rte_errno != ENOTSUP)
/* mlx4_txq.c */
int mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
+void mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
uint64_t mlx4_get_tx_port_offloads(struct mlx4_priv *priv);
int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
} while (i--);
return -rte_errno;
}
+
+void
+mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev)
+{
+ struct mlx4_proc_priv *ppriv =
+ (struct mlx4_proc_priv *)dev->process_private;
+ const size_t page_size = sysconf(_SC_PAGESIZE);
+ void *addr;
+ size_t i;
+
+ if (page_size == (size_t)-1) {
+ ERROR("Failed to get mem page size");
+ return;
+ }
+ for (i = 0; i < ppriv->uar_table_sz; i++) {
+ addr = ppriv->uar_table[i];
+ if (addr)
+ munmap(RTE_PTR_ALIGN_FLOOR(addr, page_size), page_size);
+ }
+}
+
#else
int
mlx4_tx_uar_init_secondary(struct rte_eth_dev *dev __rte_unused,
rte_errno = ENOTSUP;
return -rte_errno;
}
+
+void
+mlx4_tx_uar_uninit_secondary(struct rte_eth_dev *dev __rte_unused)
+{
+ assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+ ERROR("UAR remap is not supported");
+}
#endif
/**