net/mlx5: implement CQ for Rx using DevX API
[dpdk.git] / drivers / net / mlx5 / linux / mlx5_os.c
index a1a07b9..a3a5e78 100644 (file)
 #include <stdlib.h>
 #include <errno.h>
 #include <net/if.h>
-#include <sys/mman.h>
 #include <linux/rtnetlink.h>
+#include <linux/sockios.h>
+#include <linux/ethtool.h>
 #include <fcntl.h>
 
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
 #include <rte_malloc.h>
 #include <rte_ethdev_driver.h>
 #include <rte_ethdev_pci.h>
 #include <rte_spinlock.h>
 #include <rte_string_fns.h>
 #include <rte_alarm.h>
+#include <rte_eal_paging.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
 #include <mlx5_common.h>
 #include <mlx5_common_mp.h>
+#include <mlx5_common_mr.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5_defs.h"
 #include "mlx5.h"
+#include "mlx5_common_os.h"
 #include "mlx5_utils.h"
 #include "mlx5_rxtx.h"
 #include "mlx5_autoconf.h"
 #include "mlx5_mr.h"
 #include "mlx5_flow.h"
 #include "rte_pmd_mlx5.h"
+#include "mlx5_verbs.h"
 
 #define MLX5_TAGS_HLIST_ARRAY_SIZE 8192
 
 #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
 #endif
 
-/**
- * Get device name. Given an ibv_device pointer - return a
- * pointer to the corresponding device name.
- *
- * @param[in] dev
- *   Pointer to ibv device.
- *
- * @return
- *   Pointer to device name if dev is valid, NULL otherwise.
- */
-const char *
-mlx5_os_get_dev_device_name(void *dev)
-{
-       if (!dev)
-               return NULL;
-       return ((struct ibv_device *)dev)->name;
-}
-
-/**
- * Get ibv device name. Given an ibv_context pointer - return a
- * pointer to the corresponding device name.
- *
- * @param[in] ctx
- *   Pointer to ibv context.
- *
- * @return
- *   Pointer to device name if ctx is valid, NULL otherwise.
- */
-const char *
-mlx5_os_get_ctx_device_name(void *ctx)
-{
-       if (!ctx)
-               return NULL;
-       return ((struct ibv_context *)ctx)->device->name;
-}
+static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
 
-/**
- * Get ibv device path name. Given an ibv_context pointer - return a
- * pointer to the corresponding device path name.
- *
- * @param[in] ctx
- *   Pointer to ibv context.
- *
- * @return
- *   Pointer to device path name if ctx is valid, NULL otherwise.
- */
-const char *
-mlx5_os_get_ctx_device_path(void *ctx)
-{
-       if (!ctx)
-               return NULL;
+/* Spinlock for mlx5_shared_data allocation. */
+static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
 
-       return ((struct ibv_context *)ctx)->device->ibdev_path;
-}
+/* Process local data for secondary processes. */
+static struct mlx5_local_data mlx5_local_data;
 
 /**
- * Get umem id. Given a pointer to umem object of type
- * 'struct mlx5dv_devx_umem *' - return its id.
+ * Set the completion channel file descriptor interrupt as non-blocking.
  *
- * @param[in] umem
- *   Pointer to umem object.
+ * @param[in] rxq_obj
+ *   Pointer to RQ channel object, which includes the channel fd
+ *
+ * @param[out] fd
+ *   The file descriptor (representing the intetrrupt) used in this channel.
  *
  * @return
- *   The umem id if umem is valid, 0 otherwise.
+ *   0 on successfully setting the fd to non-blocking, non-zero otherwise.
  */
-uint32_t
-mlx5_os_get_umem_id(void *umem)
+int
+mlx5_os_set_nonblock_channel_fd(int fd)
 {
-       if (!umem)
-               return 0;
-       return ((struct mlx5dv_devx_umem *)umem)->umem_id;
+       int flags;
+
+       flags = fcntl(fd, F_GETFL);
+       return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
 }
 
 /**
@@ -201,7 +153,7 @@ mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
  * Verbs callback to allocate a memory. This function should allocate the space
  * according to the size provided residing inside a huge page.
  * Please note that all allocation must respect the alignment from libmlx5
- * (i.e. currently sysconf(_SC_PAGESIZE)).
+ * (i.e. currently rte_mem_page_size()).
  *
  * @param[in] size
  *   The size in bytes of the memory to allocate.
@@ -216,8 +168,13 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
 {
        struct mlx5_priv *priv = data;
        void *ret;
-       size_t alignment = sysconf(_SC_PAGESIZE);
        unsigned int socket = SOCKET_ID_ANY;
+       size_t alignment = rte_mem_page_size();
+       if (alignment == (size_t)-1) {
+               DRV_LOG(ERR, "Failed to get mem page size");
+               rte_errno = ENOMEM;
+               return NULL;
+       }
 
        if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
                const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
@@ -230,7 +187,7 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
                socket = ctrl->socket;
        }
        MLX5_ASSERT(data != NULL);
-       ret = rte_malloc_socket(__func__, size, alignment, socket);
+       ret = mlx5_malloc(0, size, alignment, socket);
        if (!ret && size)
                rte_errno = ENOMEM;
        return ret;
@@ -248,7 +205,7 @@ static void
 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
 {
        MLX5_ASSERT(data != NULL);
-       rte_free(ptr);
+       mlx5_free(ptr);
 }
 
 /**
@@ -280,7 +237,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
        snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
        sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE);
        if (!sh->tag_table) {
-               DRV_LOG(ERR, "tags with hash creation failed.\n");
+               DRV_LOG(ERR, "tags with hash creation failed.");
                err = ENOMEM;
                goto error;
        }
@@ -418,6 +375,109 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)
        mlx5_free_table_hash_list(priv);
 }
 
+/**
+ * Initialize shared data between primary and secondary process.
+ *
+ * A memzone is reserved by primary process and secondary processes attach to
+ * the memzone.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_init_shared_data(void)
+{
+       const struct rte_memzone *mz;
+       int ret = 0;
+
+       rte_spinlock_lock(&mlx5_shared_data_lock);
+       if (mlx5_shared_data == NULL) {
+               if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+                       /* Allocate shared memory. */
+                       mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
+                                                sizeof(*mlx5_shared_data),
+                                                SOCKET_ID_ANY, 0);
+                       if (mz == NULL) {
+                               DRV_LOG(ERR,
+                                       "Cannot allocate mlx5 shared data");
+                               ret = -rte_errno;
+                               goto error;
+                       }
+                       mlx5_shared_data = mz->addr;
+                       memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data));
+                       rte_spinlock_init(&mlx5_shared_data->lock);
+               } else {
+                       /* Lookup allocated shared memory. */
+                       mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
+                       if (mz == NULL) {
+                               DRV_LOG(ERR,
+                                       "Cannot attach mlx5 shared data");
+                               ret = -rte_errno;
+                               goto error;
+                       }
+                       mlx5_shared_data = mz->addr;
+                       memset(&mlx5_local_data, 0, sizeof(mlx5_local_data));
+               }
+       }
+error:
+       rte_spinlock_unlock(&mlx5_shared_data_lock);
+       return ret;
+}
+
+/**
+ * PMD global initialization.
+ *
+ * Independent from individual device, this function initializes global
+ * per-PMD data structures distinguishing primary and secondary processes.
+ * Hence, each initialization is called once per a process.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_init_once(void)
+{
+       struct mlx5_shared_data *sd;
+       struct mlx5_local_data *ld = &mlx5_local_data;
+       int ret = 0;
+
+       if (mlx5_init_shared_data())
+               return -rte_errno;
+       sd = mlx5_shared_data;
+       MLX5_ASSERT(sd);
+       rte_spinlock_lock(&sd->lock);
+       switch (rte_eal_process_type()) {
+       case RTE_PROC_PRIMARY:
+               if (sd->init_done)
+                       break;
+               LIST_INIT(&sd->mem_event_cb_list);
+               rte_rwlock_init(&sd->mem_event_rwlock);
+               rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+                                               mlx5_mr_mem_event_cb, NULL);
+               ret = mlx5_mp_init_primary(MLX5_MP_NAME,
+                                          mlx5_mp_os_primary_handle);
+               if (ret)
+                       goto out;
+               sd->init_done = true;
+               break;
+       case RTE_PROC_SECONDARY:
+               if (ld->init_done)
+                       break;
+               ret = mlx5_mp_init_secondary(MLX5_MP_NAME,
+                                            mlx5_mp_os_secondary_handle);
+               if (ret)
+                       goto out;
+               ++sd->secondary_cnt;
+               ld->init_done = true;
+               break;
+       default:
+               break;
+       }
+out:
+       rte_spinlock_unlock(&sd->lock);
+       return ret;
+}
+
 /**
  * Spawn an Ethernet device from Verbs information.
  *
@@ -563,6 +623,7 @@ err_secondary:
                        strerror(rte_errno));
                goto error;
        }
+       mlx5_malloc_mem_select(config.sys_mem_en);
        sh = mlx5_alloc_shared_dev_ctx(spawn, &config);
        if (!sh)
                return NULL;
@@ -684,9 +745,9 @@ err_secondary:
                        mlx5_glue->port_state_str(port_attr.state),
                        port_attr.state);
        /* Allocate private eth device data. */
-       priv = rte_zmalloc("ethdev private structure",
+       priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
                           sizeof(*priv),
-                          RTE_CACHE_LINE_SIZE);
+                          RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
        if (priv == NULL) {
                DRV_LOG(ERR, "priv allocation failure");
                err = ENOMEM;
@@ -698,12 +759,6 @@ err_secondary:
        priv->mtu = RTE_ETHER_MTU;
        priv->mp_id.port_id = port_id;
        strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN);
-#ifndef RTE_ARCH_64
-       /* Initialize UAR access locks for 32bit implementations. */
-       rte_spinlock_init(&priv->uar_lock_cq);
-       for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
-               rte_spinlock_init(&priv->uar_lock[i]);
-#endif
        /* Some internal functions rely on Netlink sockets, open them now. */
        priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
        priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
@@ -848,8 +903,6 @@ err_secondary:
                (config.hw_vlan_strip ? "" : "not "));
        config.hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
                                 IBV_RAW_PACKET_CAP_SCATTER_FCS);
-       DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
-               (config.hw_fcs_strip ? "" : "not "));
 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
        hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
@@ -947,6 +1000,99 @@ err_secondary:
                }
 #endif
        }
+       if (config.tx_pp) {
+               DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
+                       config.hca_attr.dev_freq_khz);
+               DRV_LOG(DEBUG, "Packet pacing is %ssupported",
+                       config.hca_attr.qos.packet_pacing ? "" : "not ");
+               DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
+                       config.hca_attr.cross_channel ? "" : "not ");
+               DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
+                       config.hca_attr.wqe_index_ignore ? "" : "not ");
+               DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
+                       config.hca_attr.non_wire_sq ? "" : "not ");
+               DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
+                       config.hca_attr.log_max_static_sq_wq ? "" : "not ",
+                       config.hca_attr.log_max_static_sq_wq);
+               DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
+                       config.hca_attr.qos.wqe_rate_pp ? "" : "not ");
+               if (!config.devx) {
+                       DRV_LOG(ERR, "DevX is required for packet pacing");
+                       err = ENODEV;
+                       goto error;
+               }
+               if (!config.hca_attr.qos.packet_pacing) {
+                       DRV_LOG(ERR, "Packet pacing is not supported");
+                       err = ENODEV;
+                       goto error;
+               }
+               if (!config.hca_attr.cross_channel) {
+                       DRV_LOG(ERR, "Cross channel operations are"
+                                    " required for packet pacing");
+                       err = ENODEV;
+                       goto error;
+               }
+               if (!config.hca_attr.wqe_index_ignore) {
+                       DRV_LOG(ERR, "WQE index ignore feature is"
+                                    " required for packet pacing");
+                       err = ENODEV;
+                       goto error;
+               }
+               if (!config.hca_attr.non_wire_sq) {
+                       DRV_LOG(ERR, "Non-wire SQ feature is"
+                                    " required for packet pacing");
+                       err = ENODEV;
+                       goto error;
+               }
+               if (!config.hca_attr.log_max_static_sq_wq) {
+                       DRV_LOG(ERR, "Static WQE SQ feature is"
+                                    " required for packet pacing");
+                       err = ENODEV;
+                       goto error;
+               }
+               if (!config.hca_attr.qos.wqe_rate_pp) {
+                       DRV_LOG(ERR, "WQE rate mode is required"
+                                    " for packet pacing");
+                       err = ENODEV;
+                       goto error;
+               }
+#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
+               DRV_LOG(ERR, "DevX does not provide UAR offset,"
+                            " can't create queues for packet pacing");
+               err = ENODEV;
+               goto error;
+#endif
+       }
+       if (config.devx) {
+               uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)];
+
+               err = mlx5_devx_cmd_register_read
+                       (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
+                       reg, MLX5_ST_SZ_DW(register_mtutc));
+               if (!err) {
+                       uint32_t ts_mode;
+
+                       /* MTUTC register is read successfully. */
+                       ts_mode = MLX5_GET(register_mtutc, reg,
+                                          time_stamp_mode);
+                       if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
+                               config.rt_timestamp = 1;
+               } else {
+                       /* Kernel does not support register reading. */
+                       if (config.hca_attr.dev_freq_khz ==
+                                                (NS_PER_S / MS_PER_S))
+                               config.rt_timestamp = 1;
+               }
+       }
+       /*
+        * If HW has bug working with tunnel packet decapsulation and
+        * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
+        * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
+        */
+       if (config.hca_attr.scatter_fcs_w_decap_disable && config.decap_en)
+               config.hw_fcs_strip = 0;
+       DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
+               (config.hw_fcs_strip ? "" : "not "));
        if (config.mprq.enabled && mprq) {
                if (config.mprq.stride_num_n &&
                    (config.mprq.stride_num_n > mprq_max_stride_num_n ||
@@ -1056,14 +1202,13 @@ err_secondary:
        TAILQ_INIT(&priv->flow_meters);
        TAILQ_INIT(&priv->flow_meter_profiles);
        /* Hint libmlx5 to use PMD allocator for data plane resources */
-       struct mlx5dv_ctx_allocators alctr = {
-               .alloc = &mlx5_alloc_verbs_buf,
-               .free = &mlx5_free_verbs_buf,
-               .data = priv,
-       };
        mlx5_glue->dv_set_context_attr(sh->ctx,
-                                      MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
-                                      (void *)((uintptr_t)&alctr));
+                       MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
+                       (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
+                               .alloc = &mlx5_alloc_verbs_buf,
+                               .free = &mlx5_free_verbs_buf,
+                               .data = priv,
+                       }));
        /* Bring Ethernet device up. */
        DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
                eth_dev->data->port_id);
@@ -1176,7 +1321,7 @@ error:
                        mlx5_flow_id_pool_release(priv->qrss_id_pool);
                if (own_domain_id)
                        claim_zero(rte_eth_switch_domain_free(priv->domain_id));
-               rte_free(priv);
+               mlx5_free(priv);
                if (eth_dev != NULL)
                        eth_dev->data->dev_private = NULL;
        }
@@ -1495,10 +1640,10 @@ mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
         * Now we can determine the maximal
         * amount of devices to be spawned.
         */
-       list = rte_zmalloc("device spawn data",
-                        sizeof(struct mlx5_dev_spawn_data) *
-                        (np ? np : nd),
-                        RTE_CACHE_LINE_SIZE);
+       list = mlx5_malloc(MLX5_MEM_ZERO,
+                          sizeof(struct mlx5_dev_spawn_data) *
+                          (np ? np : nd),
+                          RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
        if (!list) {
                DRV_LOG(ERR, "spawn data array allocation failure");
                rte_errno = ENOMEM;
@@ -1559,6 +1704,8 @@ mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                                        if (list[ns].info.port_name == bd)
                                                ns++;
                                        break;
+                               case MLX5_PHYS_PORT_NAME_TYPE_PFHPF:
+                                       /* Fallthrough */
                                case MLX5_PHYS_PORT_NAME_TYPE_PFVF:
                                        if (list[ns].info.pf_num == bd)
                                                ns++;
@@ -1633,8 +1780,8 @@ mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                                         */
                                        continue;
                                }
-                               ret = mlx5_get_master_ifname
-                                       (ibv_match[i]->ibdev_path, &ifname);
+                               ret = mlx5_get_ifname_sysfs
+                                       (ibv_match[i]->ibdev_path, ifname);
                                if (!ret)
                                        list[ns].ifindex =
                                                if_nametoindex(ifname);
@@ -1719,6 +1866,7 @@ mlx5_os_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                },
                .dv_esw_en = 1,
                .dv_flow_en = 1,
+               .decap_en = 1,
                .log_hp_size = MLX5_ARG_UNSET,
        };
        /* Device specific configuration. */
@@ -1787,7 +1935,7 @@ exit:
        if (nl_route >= 0)
                close(nl_route);
        if (list)
-               rte_free(list);
+               mlx5_free(list);
        MLX5_ASSERT(ibv_list);
        mlx5_glue->free_device_list(ibv_list);
        return ret;
@@ -1879,6 +2027,9 @@ mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
 {
        int dbmap_env;
        int err = 0;
+
+       sh->numa_node = spawn->pci_dev->device.numa_node;
+       pthread_mutex_init(&sh->txpp.mutex, NULL);
        /*
         * Configure environment variable "MLX5_BF_SHUT_UP"
         * before the device creation. The rdma_core library
@@ -1993,6 +2144,177 @@ mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh)
 #endif
 }
 
+/**
+ * Read statistics by a named counter.
+ *
+ * @param[in] priv
+ *   Pointer to the private device data structure.
+ * @param[in] ctr_name
+ *   Pointer to the name of the statistic counter to read
+ * @param[out] stat
+ *   Pointer to read statistic value.
+ * @return
+ *   0 on success and stat is valud, 1 if failed to read the value
+ *   rte_errno is set.
+ *
+ */
+int
+mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name,
+                     uint64_t *stat)
+{
+       int fd;
+
+       if (priv->sh) {
+               MKSTR(path, "%s/ports/%d/hw_counters/%s",
+                         priv->sh->ibdev_path,
+                         priv->dev_port,
+                         ctr_name);
+               fd = open(path, O_RDONLY);
+               if (fd != -1) {
+                       char buf[21] = {'\0'};
+                       ssize_t n = read(fd, buf, sizeof(buf));
+
+                       close(fd);
+                       if (n != -1) {
+                               *stat = strtoull(buf, NULL, 10);
+                               return 0;
+                       }
+               }
+       }
+       *stat = 0;
+       return 1;
+}
+
+/**
+ * Set the reg_mr and dereg_mr call backs
+ *
+ * @param reg_mr_cb[out]
+ *   Pointer to reg_mr func
+ * @param dereg_mr_cb[out]
+ *   Pointer to dereg_mr func
+ *
+ */
+void
+mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
+                     mlx5_dereg_mr_t *dereg_mr_cb)
+{
+       *reg_mr_cb = mlx5_verbs_ops.reg_mr;
+       *dereg_mr_cb = mlx5_verbs_ops.dereg_mr;
+}
+
+/**
+ * Remove a MAC address from device
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param index
+ *   MAC address index.
+ */
+void
+mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       const int vf = priv->config.vf;
+
+       if (vf)
+               mlx5_nl_mac_addr_remove(priv->nl_socket_route,
+                                       mlx5_ifindex(dev), priv->mac_own,
+                                       &dev->data->mac_addrs[index], index);
+}
+
+/**
+ * Adds a MAC address to the device
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param mac_addr
+ *   MAC address to register.
+ * @param index
+ *   MAC address index.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise
+ */
+int
+mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
+                    uint32_t index)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+       const int vf = priv->config.vf;
+       int ret = 0;
+
+       if (vf)
+               ret = mlx5_nl_mac_addr_add(priv->nl_socket_route,
+                                          mlx5_ifindex(dev), priv->mac_own,
+                                          mac, index);
+       return ret;
+}
+
+/**
+ * Modify a VF MAC address
+ *
+ * @param priv
+ *   Pointer to device private data.
+ * @param mac_addr
+ *   MAC address to modify into.
+ * @param iface_idx
+ *   Net device interface index
+ * @param vf_index
+ *   VF index
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise
+ */
+int
+mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv,
+                          unsigned int iface_idx,
+                          struct rte_ether_addr *mac_addr,
+                          int vf_index)
+{
+       return mlx5_nl_vf_mac_addr_modify
+               (priv->nl_socket_route, iface_idx, mac_addr, vf_index);
+}
+
+/**
+ * Set device promiscuous mode
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param enable
+ *   0 - promiscuous is disabled, otherwise - enabled
+ *
+ * @return
+ *   0 on success, a negative error value otherwise
+ */
+int
+mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       return mlx5_nl_promisc(priv->nl_socket_route,
+                              mlx5_ifindex(dev), !!enable);
+}
+
+/**
+ * Set device promiscuous mode
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param enable
+ *   0 - all multicase is disabled, otherwise - enabled
+ *
+ * @return
+ *   0 on success, a negative error value otherwise
+ */
+int
+mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable)
+{
+       struct mlx5_priv *priv = dev->data->dev_private;
+
+       return mlx5_nl_allmulti(priv->nl_socket_route,
+                               mlx5_ifindex(dev), !!enable);
+}
+
 const struct eth_dev_ops mlx5_os_dev_ops = {
        .dev_configure = mlx5_dev_configure,
        .dev_start = mlx5_dev_start,
@@ -2012,7 +2334,7 @@ const struct eth_dev_ops mlx5_os_dev_ops = {
        .xstats_get_names = mlx5_xstats_get_names,
        .fw_version_get = mlx5_fw_version_get,
        .dev_infos_get = mlx5_dev_infos_get,
-       .read_clock = mlx5_read_clock,
+       .read_clock = mlx5_txpp_read_clock,
        .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
        .vlan_filter_set = mlx5_vlan_filter_set,
        .rx_queue_setup = mlx5_rx_queue_setup,
@@ -2061,6 +2383,7 @@ const struct eth_dev_ops mlx5_os_dev_sec_ops = {
        .xstats_get_names = mlx5_xstats_get_names,
        .fw_version_get = mlx5_fw_version_get,
        .dev_infos_get = mlx5_dev_infos_get,
+       .read_clock = mlx5_txpp_read_clock,
        .rx_descriptor_status = mlx5_rx_descriptor_status,
        .tx_descriptor_status = mlx5_tx_descriptor_status,
        .rxq_info_get = mlx5_rxq_info_get,
@@ -2091,6 +2414,7 @@ const struct eth_dev_ops mlx5_os_dev_ops_isolate = {
        .xstats_get_names = mlx5_xstats_get_names,
        .fw_version_get = mlx5_fw_version_get,
        .dev_infos_get = mlx5_dev_infos_get,
+       .read_clock = mlx5_txpp_read_clock,
        .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
        .vlan_filter_set = mlx5_vlan_filter_set,
        .rx_queue_setup = mlx5_rx_queue_setup,