git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
ethdev: add a missing sanity check for Tx queue setup
[dpdk.git]
/
lib
/
librte_reorder
/
rte_reorder.c
diff --git
a/lib/librte_reorder/rte_reorder.c
b/lib/librte_reorder/rte_reorder.c
index
42d2a47
..
010dff6
100644
(file)
--- a/
lib/librte_reorder/rte_reorder.c
+++ b/
lib/librte_reorder/rte_reorder.c
@@
-39,13
+39,17
@@
#include <rte_memzone.h>
#include <rte_eal_memconfig.h>
#include <rte_errno.h>
#include <rte_memzone.h>
#include <rte_eal_memconfig.h>
#include <rte_errno.h>
-#include <rte_tailq.h>
#include <rte_malloc.h>
#include "rte_reorder.h"
TAILQ_HEAD(rte_reorder_list, rte_tailq_entry);
#include <rte_malloc.h>
#include "rte_reorder.h"
TAILQ_HEAD(rte_reorder_list, rte_tailq_entry);
+static struct rte_tailq_elem rte_reorder_tailq = {
+ .name = "RTE_REORDER",
+};
+EAL_REGISTER_TAILQ(rte_reorder_tailq)
+
#define NO_FLAGS 0
#define RTE_REORDER_PREFIX "RO_"
#define RTE_REORDER_NAMESIZE 32
#define NO_FLAGS 0
#define RTE_REORDER_PREFIX "RO_"
#define RTE_REORDER_NAMESIZE 32
@@
-69,6
+73,7
@@
struct rte_reorder_buffer {
unsigned int memsize; /**< memory area size of reorder buffer */
struct cir_buffer ready_buf; /**< temp buffer for dequeued entries */
struct cir_buffer order_buf; /**< buffer used to reorder entries */
unsigned int memsize; /**< memory area size of reorder buffer */
struct cir_buffer ready_buf; /**< temp buffer for dequeued entries */
struct cir_buffer order_buf; /**< buffer used to reorder entries */
+ int is_initialized;
} __rte_cache_aligned;
static void
} __rte_cache_aligned;
static void
@@
-127,12
+132,7
@@
rte_reorder_create(const char *name, unsigned socket_id, unsigned int size)
const unsigned int bufsize = sizeof(struct rte_reorder_buffer) +
(2 * size * sizeof(struct rte_mbuf *));
const unsigned int bufsize = sizeof(struct rte_reorder_buffer) +
(2 * size * sizeof(struct rte_mbuf *));
- /* check that we have an initialised tail queue */
- reorder_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_REORDER, rte_reorder_list);
- if (!reorder_list) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
/* Check user arguments. */
if (!rte_is_power_of_2(size)) {
/* Check user arguments. */
if (!rte_is_power_of_2(size)) {
@@
-220,12
+220,7
@@
rte_reorder_free(struct rte_reorder_buffer *b)
if (b == NULL)
return;
if (b == NULL)
return;
- /* check that we have an initialised tail queue */
- reorder_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_REORDER, rte_reorder_list);
- if (!reorder_list) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
+ reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
@@
-256,12
+251,7
@@
rte_reorder_find_existing(const char *name)
struct rte_tailq_entry *te;
struct rte_reorder_list *reorder_list;
struct rte_tailq_entry *te;
struct rte_reorder_list *reorder_list;
- /* check that we have an initialised tail queue */
- reorder_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_REORDER, rte_reorder_list);
- if (!reorder_list) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
TAILQ_FOREACH(te, reorder_list, next) {
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
TAILQ_FOREACH(te, reorder_list, next) {
@@
-336,6
+326,11
@@
rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf)
uint32_t offset, position;
struct cir_buffer *order_buf = &b->order_buf;
uint32_t offset, position;
struct cir_buffer *order_buf = &b->order_buf;
+ if (!b->is_initialized) {
+ b->min_seqn = mbuf->seqn;
+ b->is_initialized = 1;
+ }
+
/*
* calculate the offset from the head pointer we need to go.
* The subtraction takes care of the sequence number wrapping.
/*
* calculate the offset from the head pointer we need to go.
* The subtraction takes care of the sequence number wrapping.