X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx.h;h=d34f3cc040ee337d815a4360680b9ca3eed6b0fc;hb=e138cb31d2ffb2af0aaf6c55120f73ad03667c01;hp=69344f6bbd84ef4178ba650da57b86ce0fa7c743;hpb=6e78005a9b3037ecd0a7de456406233a6c3446d9;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 69344f6bbd..d34f3cc040 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -106,6 +106,7 @@ struct rxq_zip { struct mlx5_rxq_data { unsigned int csum:1; /* Enable checksum offloading. */ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ + unsigned int hw_timestamp:1; /* Enable HW timestamp. */ unsigned int vlan_strip:1; /* Enable VLAN stripping. */ unsigned int crc_present:1; /* CRC must be subtracted. */ unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */ @@ -114,7 +115,7 @@ struct mlx5_rxq_data { unsigned int rss_hash:1; /* RSS hash result is enabled. */ unsigned int mark:1; /* Marked flow available on the queue. */ unsigned int pending_err:1; /* CQE error needs to be handled. */ - unsigned int :15; /* Remaining bits. */ + unsigned int :14; /* Remaining bits. */ volatile uint32_t *rq_db; volatile uint32_t *cq_db; uint16_t port_id; @@ -147,6 +148,8 @@ struct mlx5_rxq_ibv { /* RX queue control descriptor. */ struct mlx5_rxq_ctrl { + LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ struct priv *priv; /* Back pointer to private data. */ struct mlx5_rxq_ibv *ibv; /* Verbs elements. */ struct mlx5_rxq_data rxq; /* Data path structure. */ @@ -154,103 +157,24 @@ struct mlx5_rxq_ctrl { unsigned int irq:1; /* Whether IRQ is enabled. */ }; -/* Hash RX queue types. */ -enum hash_rxq_type { - HASH_RXQ_TCPV4, - HASH_RXQ_UDPV4, - HASH_RXQ_IPV4, - HASH_RXQ_TCPV6, - HASH_RXQ_UDPV6, - HASH_RXQ_IPV6, - HASH_RXQ_ETH, -}; - -/* Flow structure with Ethernet specification. It is packed to prevent padding - * between attr and spec as this layout is expected by libibverbs. */ -struct flow_attr_spec_eth { - struct ibv_flow_attr attr; - struct ibv_flow_spec_eth spec; -} __attribute__((packed)); - -/* Define a struct flow_attr_spec_eth object as an array of at least - * "size" bytes. Room after the first index is normally used to store - * extra flow specifications. */ -#define FLOW_ATTR_SPEC_ETH(name, size) \ - struct flow_attr_spec_eth name \ - [((size) / sizeof(struct flow_attr_spec_eth)) + \ - !!((size) % sizeof(struct flow_attr_spec_eth))] - -/* Initialization data for hash RX queue. */ -struct hash_rxq_init { - uint64_t hash_fields; /* Fields that participate in the hash. */ - uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */ - unsigned int flow_priority; /* Flow priority to use. */ - union { - struct { - enum ibv_flow_spec_type type; - uint16_t size; - } hdr; - struct ibv_flow_spec_tcp_udp tcp_udp; - struct ibv_flow_spec_ipv4 ipv4; - struct ibv_flow_spec_ipv6 ipv6; - struct ibv_flow_spec_eth eth; - } flow_spec; /* Flow specification template. */ - const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */ -}; - -/* Initialization data for indirection table. */ -struct ind_table_init { - unsigned int max_size; /* Maximum number of WQs. */ - /* Hash RX queues using this table. */ - unsigned int hash_types; - unsigned int hash_types_n; -}; - -/* Initialization data for special flows. */ -struct special_flow_init { - uint8_t dst_mac_val[6]; - uint8_t dst_mac_mask[6]; - unsigned int hash_types; - unsigned int per_vlan:1; -}; - -enum hash_rxq_flow_type { - HASH_RXQ_FLOW_TYPE_PROMISC, - HASH_RXQ_FLOW_TYPE_ALLMULTI, - HASH_RXQ_FLOW_TYPE_BROADCAST, - HASH_RXQ_FLOW_TYPE_IPV6MULTI, - HASH_RXQ_FLOW_TYPE_MAC, +/* Indirection table. */ +struct mlx5_ind_table_ibv { + LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ + struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */ + uint16_t queues_n; /**< Number of queues in the list. */ + uint16_t queues[]; /**< Queue list. */ }; -#ifndef NDEBUG -static inline const char * -hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type) -{ - switch (flow_type) { - case HASH_RXQ_FLOW_TYPE_PROMISC: - return "promiscuous"; - case HASH_RXQ_FLOW_TYPE_ALLMULTI: - return "allmulticast"; - case HASH_RXQ_FLOW_TYPE_BROADCAST: - return "broadcast"; - case HASH_RXQ_FLOW_TYPE_IPV6MULTI: - return "IPv6 multicast"; - case HASH_RXQ_FLOW_TYPE_MAC: - return "MAC"; - } - return NULL; -} -#endif /* NDEBUG */ - -struct hash_rxq { - struct priv *priv; /* Back pointer to private data. */ - struct ibv_qp *qp; /* Hash RX QP. */ - enum hash_rxq_type type; /* Hash RX queue type. */ - /* MAC flow steering rules, one per VLAN ID. */ - struct ibv_flow *mac_flow - [MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS]; - struct ibv_flow *special_flow - [MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS]; +/* Hash Rx queue. */ +struct mlx5_hrxq { + LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ + struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */ + struct ibv_qp *qp; /* Verbs queue pair. */ + uint64_t hash_fields; /* Verbs Hash fields. */ + uint8_t rss_key_len; /* Hash key length in bytes. */ + uint8_t rss_key[]; /* Hash key. */ }; /* TX queue descriptor. */ @@ -310,18 +234,9 @@ struct mlx5_txq_ctrl { /* mlx5_rxq.c */ -extern const struct hash_rxq_init hash_rxq_init[]; -extern const unsigned int hash_rxq_init_n; - extern uint8_t rss_hash_default_key[]; extern const size_t rss_hash_default_key_len; -size_t priv_flow_attr(struct priv *, struct ibv_flow_attr *, - size_t, enum hash_rxq_type); -int priv_create_hash_rxqs(struct priv *); -void priv_destroy_hash_rxqs(struct priv *); -int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type); -int priv_rehash_flows(struct priv *); void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *); int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, const struct rte_eth_rxconf *, struct rte_mempool *); @@ -335,6 +250,28 @@ struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t); int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *); int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *); int mlx5_priv_rxq_ibv_verify(struct priv *); +struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t, + uint16_t, unsigned int, + struct rte_mempool *); +struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t); +int mlx5_priv_rxq_release(struct priv *, uint16_t); +int mlx5_priv_rxq_releasable(struct priv *, uint16_t); +int mlx5_priv_rxq_verify(struct priv *); +int rxq_alloc_elts(struct mlx5_rxq_ctrl *); +struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *, + uint16_t [], + uint16_t); +struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *, + uint16_t [], + uint16_t); +int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *); +int mlx5_priv_ind_table_ibv_verify(struct priv *); +struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t, + uint64_t, uint16_t [], uint16_t); +struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t, + uint64_t, uint16_t [], uint16_t); +int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *); +int mlx5_priv_hrxq_ibv_verify(struct priv *); /* mlx5_txq.c */ @@ -568,7 +505,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq) txq->cq_ci = cq_ci; txq->elts_tail = elts_tail; /* Update the consumer index. */ - rte_wmb(); + rte_compiler_barrier(); *txq->cq_db = rte_cpu_to_be_32(cq_ci); } @@ -641,15 +578,18 @@ mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) } /** - * Ring TX queue doorbell. + * Ring TX queue doorbell and flush the update if requested. * * @param txq * Pointer to TX queue structure. * @param wqe * Pointer to the last WQE posted in the NIC. + * @param cond + * Request for write memory barrier after BlueFlame update. */ static __rte_always_inline void -mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) +mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe, + int cond) { uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); volatile uint64_t *src = ((volatile uint64_t *)wqe); @@ -659,6 +599,22 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) /* Ensure ordering between DB record and BF copy. */ rte_wmb(); *dst = *src; + if (cond) + rte_wmb(); +} + +/** + * Ring TX queue doorbell and flush the update by write memory barrier. + * + * @param txq + * Pointer to TX queue structure. + * @param wqe + * Pointer to the last WQE posted in the NIC. + */ +static __rte_always_inline void +mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe) +{ + mlx5_tx_dbrec_cond_wmb(txq, wqe, 1); } #endif /* RTE_PMD_MLX5_RXTX_H_ */