X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.h;h=2676634956ccb5dcfd79b218fd0926be7d604e1c;hb=6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1;hp=c0e1adf140a1f522d0cffe853aefb7a6bb858a69;hpb=38f0a160b5fe1c9e8451dea2cab9a78ebfe86675;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index c0e1adf140..2676634956 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -28,6 +28,7 @@ #include #include #include +#include #include "mlx5_utils.h" #include "mlx5.h" @@ -40,7 +41,6 @@ #define MLX5_FLOW_TUNNEL 5 struct mlx5_rxq_stats { - unsigned int idx; /**< Mapping index. */ #ifdef MLX5_PMD_SOFT_COUNTERS uint64_t ipackets; /**< Total of successfully received packets. */ uint64_t ibytes; /**< Total of successfully received bytes. */ @@ -50,7 +50,6 @@ struct mlx5_rxq_stats { }; struct mlx5_txq_stats { - unsigned int idx; /**< Mapping index. */ #ifdef MLX5_PMD_SOFT_COUNTERS uint64_t opackets; /**< Total of successfully sent packets. */ uint64_t obytes; /**< Total of successfully sent bytes. */ @@ -115,6 +114,7 @@ struct mlx5_rxq_data { struct rte_mempool *mp; struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */ struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */ + uint16_t idx; /* Queue index. */ struct mlx5_rxq_stats stats; uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */ struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */ @@ -140,14 +140,13 @@ struct mlx5_rxq_ibv { /* RX queue control descriptor. */ struct mlx5_rxq_ctrl { + struct mlx5_rxq_data rxq; /* Data path structure. */ LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ struct mlx5_rxq_ibv *ibv; /* Verbs elements. */ struct mlx5_priv *priv; /* Back pointer to private data. */ - struct mlx5_rxq_data rxq; /* Data path structure. */ unsigned int socket; /* CPU socket ID for allocations. */ unsigned int irq:1; /* Whether IRQ is enabled. */ - uint16_t idx; /* Queue index. */ uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */ uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */ }; @@ -167,6 +166,9 @@ struct mlx5_hrxq { rte_atomic32_t refcnt; /* Reference counter. */ struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */ struct ibv_qp *qp; /* Verbs queue pair. */ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + void *action; /* DV QP action pointer. */ +#endif uint64_t hash_fields; /* Verbs Hash fields. */ uint32_t rss_key_len; /* Hash key length in bytes. */ uint8_t rss_key[]; /* Hash key. */ @@ -202,8 +204,9 @@ struct mlx5_txq_data { volatile void *wqes; /* Work queue (use volatile to write into). */ volatile uint32_t *qp_db; /* Work queue doorbell. */ volatile uint32_t *cq_db; /* Completion queue doorbell. */ - volatile void *bf_reg; /* Blueflame register remapped. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ + uint16_t port_id; /* Port ID of device. */ + uint16_t idx; /* Queue index. */ struct mlx5_txq_stats stats; /* TX queue counters. */ #ifndef RTE_ARCH_64 rte_spinlock_t *uar_lock; @@ -222,6 +225,7 @@ struct mlx5_txq_ibv { /* TX queue control descriptor. */ struct mlx5_txq_ctrl { + struct mlx5_txq_data txq; /* Data path structure. */ LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */ rte_atomic32_t refcnt; /* Reference counter. */ unsigned int socket; /* CPU socket ID for allocations. */ @@ -229,12 +233,13 @@ struct mlx5_txq_ctrl { unsigned int max_tso_header; /* Max TSO header size. */ struct mlx5_txq_ibv *ibv; /* Verbs queue object. */ struct mlx5_priv *priv; /* Back pointer to private data. */ - struct mlx5_txq_data txq; /* Data path structure. */ off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */ - volatile void *bf_reg_orig; /* Blueflame register from verbs. */ - uint16_t idx; /* Queue index. */ + void *bf_reg; /* BlueFlame register from Verbs. */ }; +#define MLX5_TX_BFREG(txq) \ + (MLX5_PROC_PRIV((txq)->port_id)->uar_table[(txq)->idx]) + /* mlx5_rxq.c */ extern uint8_t rss_hash_default_key[]; @@ -302,7 +307,7 @@ uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf); void mlx5_tx_queue_release(void *dpdk_txq); -int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd); +int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd); struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx); struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx); int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv); @@ -367,6 +372,10 @@ uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr); uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb); uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr, struct rte_mempool *mp); +int mlx5_dma_map(struct rte_pci_device *pdev, void *addr, uint64_t iova, + size_t len); +int mlx5_dma_unmap(struct rte_pci_device *pdev, void *addr, uint64_t iova, + size_t len); /** * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and @@ -568,6 +577,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq) } #endif /* NDEBUG */ ++cq_ci; + rte_cio_rmb(); txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter); ctrl = (volatile struct mlx5_wqe_ctrl *) tx_mlx5_wqe(txq, txq->wqe_pi); @@ -700,7 +710,7 @@ static __rte_always_inline void mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, int cond) { - uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); + uint64_t *dst = MLX5_TX_BFREG(txq); volatile uint64_t *src = ((volatile uint64_t *)wqe); rte_cio_wmb(); @@ -774,7 +784,7 @@ txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf, * in if any of SWP offsets is set. Therefore, all of the L3 offsets * should be set regardless of HW offload. */ - off = buf->outer_l2_len + (vlan ? sizeof(struct vlan_hdr) : 0); + off = buf->outer_l2_len + (vlan ? sizeof(struct rte_vlan_hdr) : 0); offsets[1] = off >> 1; /* Outer L3 offset. */ off += buf->outer_l3_len; if (tunnel == PKT_TX_TUNNEL_UDP)