/*-
* BSD LICENSE
*
- * Copyright 2012-2017 6WIND S.A.
- * Copyright 2012-2017 Mellanox.
+ * Copyright 2012 6WIND S.A.
+ * Copyright 2012 Mellanox
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#ifndef RTE_PMD_MLX4_H_
#define RTE_PMD_MLX4_H_
-#include <stddef.h>
#include <stdint.h>
#include <limits.h>
#define MLX4_PMD_TX_PER_COMP_REQ 64
/* Maximum number of Scatter/Gather Elements per Work Request. */
-#ifndef MLX4_PMD_SGE_WR_N
#define MLX4_PMD_SGE_WR_N 4
-#endif
/* Maximum size for inline data. */
-#ifndef MLX4_PMD_MAX_INLINE
#define MLX4_PMD_MAX_INLINE 0
-#endif
/*
* Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
#define MLX4_PMD_TX_MP_CACHE 8
#endif
-/*
- * If defined, only use software counters. The PMD will never ask the hardware
- * for these, and many of them won't be available.
- */
-#ifndef MLX4_PMD_SOFT_COUNTERS
-#define MLX4_PMD_SOFT_COUNTERS 1
-#endif
-
/* Alarm timeout. */
#define MLX4_ALARM_TIMEOUT_US 100000
+/* Port parameter. */
+#define MLX4_PMD_PORT_KVARG "port"
+
enum {
PCI_VENDOR_ID_MELLANOX = 0x15b3,
};
/* Number of elements in array. */
#define elemof(a) (sizeof(a) / sizeof((a)[0]))
-/* Cast pointer p to structure member m to its parent structure of type t. */
-#define containerof(p, t, m) ((t *)((uint8_t *)(p) - offsetof(t, m)))
-
-/* Branch prediction helpers. */
-#ifndef likely
-#define likely(c) __builtin_expect(!!(c), 1)
-#endif
-#ifndef unlikely
-#define unlikely(c) __builtin_expect(!!(c), 0)
-#endif
-
/* Debugging */
#ifndef NDEBUG
#include <stdio.h>
(DEBUG__(__VA_ARGS__), 0) \
})[0])
#define DEBUG(...) DEBUG_(__VA_ARGS__, '\n')
+#ifndef MLX4_PMD_DEBUG_BROKEN_VERBS
#define claim_zero(...) assert((__VA_ARGS__) == 0)
+#else /* MLX4_PMD_DEBUG_BROKEN_VERBS */
+#define claim_zero(...) \
+ (void)(((__VA_ARGS__) == 0) || \
+ DEBUG("Assertion `(" # __VA_ARGS__ ") == 0' failed (IGNORED)."))
+#endif /* MLX4_PMD_DEBUG_BROKEN_VERBS */
#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
#define claim_positive(...) assert((__VA_ARGS__) >= 0)
#else /* NDEBUG */
struct mlx4_rxq_stats {
unsigned int idx; /**< Mapping index. */
-#ifdef MLX4_PMD_SOFT_COUNTERS
uint64_t ipackets; /**< Total of successfully received packets. */
uint64_t ibytes; /**< Total of successfully received bytes. */
-#endif
uint64_t idropped; /**< Total of packets dropped when RX ring full. */
uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
};
/* RX queue descriptor. */
struct rxq {
+ LIST_ENTRY(rxq) next; /* Used by parent queue only */
struct priv *priv; /* Back pointer to private data. */
struct rte_mempool *mp; /* Memory Pool for allocations. */
struct ibv_mr *mr; /* Memory Region (for mp). */
struct ibv_qp *qp; /* Queue Pair. */
struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+ struct ibv_comp_channel *channel;
/*
* Each VLAN ID requires a separate flow steering rule.
*/
struct mlx4_rxq_stats stats; /* RX queue counters. */
unsigned int socket; /* CPU socket ID for allocations. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
+ struct {
+ uint16_t queues_n;
+ uint16_t queues[RTE_MAX_QUEUES_PER_PORT];
+ } rss;
};
/* TX element. */
struct mlx4_txq_stats {
unsigned int idx; /**< Mapping index. */
-#ifdef MLX4_PMD_SOFT_COUNTERS
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
-#endif
uint64_t odropped; /**< Total of packets not sent when TX ring full. */
};
struct ibv_qp *qp; /* Queue Pair. */
struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
-#if MLX4_PMD_MAX_INLINE > 0
uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
-#endif
unsigned int elts_n; /* (*elts)[] length. */
struct txq_elt (*elts)[]; /* TX elements. */
unsigned int elts_head; /* Current index in (*elts)[]. */
unsigned int rss:1; /* RSS is enabled. */
unsigned int vf:1; /* This is a VF device. */
unsigned int pending_alarm:1; /* An alarm is pending. */
-#ifdef INLINE_RECV
+ unsigned int isolated:1; /* Toggle isolated mode. */
unsigned int inl_recv_size; /* Inline recv size */
-#endif
unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
/* RX/TX queues. */
- struct rxq rxq_parent; /* Parent queue when RSS is enabled. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct rxq *(*rxqs)[]; /* RX queues. */
struct txq *(*txqs)[]; /* TX queues. */
+ struct rte_intr_handle intr_handle_dev; /* Device interrupt handler. */
struct rte_intr_handle intr_handle; /* Interrupt handler. */
+ struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
LIST_HEAD(mlx4_flows, rte_flow) flows;
+ struct rte_intr_conf intr_conf; /* Active interrupt configuration. */
+ LIST_HEAD(mlx4_parents, rxq) parents;
rte_spinlock_t lock; /* Lock for control functions. */
};
void priv_lock(struct priv *priv);
void priv_unlock(struct priv *priv);
+int
+rxq_create_qp(struct rxq *rxq,
+ uint16_t desc,
+ int inactive,
+ int children_n,
+ struct rxq *rxq_parent);
+
+void
+rxq_parent_cleanup(struct rxq *parent);
+
+struct rxq *
+priv_parent_create(struct priv *priv,
+ uint16_t queues[],
+ uint16_t children_n);
+
#endif /* RTE_PMD_MLX4_H_ */