/* In case mlx5_dev_stop() has not been called. */
mlx5_dev_interrupt_handler_uninstall(dev);
mlx5_traffic_disable(dev);
+ mlx5_flow_flush(dev, NULL);
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
close(priv->nl_socket_route);
if (priv->nl_socket_rdma >= 0)
close(priv->nl_socket_rdma);
+ if (priv->mnl_socket)
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
ret = mlx5_hrxq_ibv_verify(dev);
if (ret)
DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
rte_memseg_walk(find_lower_va_bound, &addr);
/* keep distance to hugepages to minimize potential conflicts. */
- addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
+ addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE));
/* anonymous mmap, no real memory consumption. */
addr = mmap(addr, MLX5_UAR_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
priv->device_attr = attr;
priv->pd = pd;
priv->mtu = ETHER_MTU;
+#ifndef RTE_ARCH_64
+ /* Initialize UAR access locks for 32bit implementations. */
+ rte_spinlock_init(&priv->uar_lock_cq);
+ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+ rte_spinlock_init(&priv->uar_lock[i]);
+#endif
/* Some internal functions rely on Netlink sockets, open them now. */
priv->nl_socket_rdma = mlx5_nl_init(0, NETLINK_RDMA);
priv->nl_socket_route = mlx5_nl_init(RTMGRP_LINK, NETLINK_ROUTE);
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
if (vf && config.vf_nl_en)
mlx5_nl_mac_addr_sync(eth_dev);
+ priv->mnl_socket = mlx5_nl_flow_socket_create();
+ if (!priv->mnl_socket) {
+ err = -rte_errno;
+ DRV_LOG(WARNING,
+ "flow rules relying on switch offloads will not be"
+ " supported: cannot open libmnl socket: %s",
+ strerror(rte_errno));
+ } else {
+ struct rte_flow_error error;
+ unsigned int ifindex = mlx5_ifindex(eth_dev);
+
+ if (!ifindex) {
+ err = -rte_errno;
+ error.message =
+ "cannot retrieve network interface index";
+ } else {
+ err = mlx5_nl_flow_init(priv->mnl_socket, ifindex,
+ &error);
+ }
+ if (err) {
+ DRV_LOG(WARNING,
+ "flow rules relying on switch offloads will"
+ " not be supported: %s: %s",
+ error.message, strerror(rte_errno));
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ priv->mnl_socket = NULL;
+ }
+ }
TAILQ_INIT(&priv->flows);
TAILQ_INIT(&priv->ctrl_flows);
/* Hint libmlx5 to use PMD allocator for data plane resources */
close(priv->nl_socket_route);
if (priv->nl_socket_rdma >= 0)
close(priv->nl_socket_rdma);
+ if (priv->mnl_socket)
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
if (own_domain_id)
claim_zero(rte_eth_switch_domain_free(priv->domain_id));
rte_free(priv);
* Netlink calls assuming kernel drivers are recent enough to
* support them.
*
- * In the event of identification failure through Netlink, either:
+ * In the event of identification failure through Netlink, try again
+ * through sysfs, then either:
*
* 1. No device matches (n == 0), complain and bail out.
* 2. A single IB device matches (n == 1) and is not a representor,
if (nl_route < 0 ||
!list[i].ifindex ||
mlx5_nl_switch_info(nl_route, list[i].ifindex,
- &list[i].info)) {
+ &list[i].info) ||
+ ((!list[i].info.representor && !list[i].info.master) &&
+ mlx5_sysfs_switch_info(list[i].ifindex, &list[i].info))) {
list[i].ifindex = 0;
memset(&list[i].info, 0, sizeof(list[i].info));
continue;