4 * Copyright(c) 2015 EZchip Semiconductor Ltd. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of EZchip Semiconductor nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_eal_memconfig.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
42 #include <gxio/mpipe.h>
44 /* mPIPE GBE hardware register definitions. */
45 #define MPIPE_GBE_NETWORK_CONFIGURATION 0x8008
46 #define MPIPE_GBE_NETWORK_CONFIGURATION__COPY_ALL_SHIFT 4
47 #define MPIPE_GBE_NETWORK_CONFIGURATION__MULTI_HASH_ENA_SHIFT 6
48 #define MPIPE_GBE_NETWORK_CONFIGURATION__UNI_HASH_ENA_SHIFT 7
50 /* mPIPE XAUI hardware register definitions. */
51 #define MPIPE_XAUI_RECEIVE_CONFIGURATION 0x8020
52 #define MPIPE_XAUI_RECEIVE_CONFIGURATION__COPY_ALL_SHIFT 0
53 #define MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_MULTI_SHIFT 2
54 #define MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_UNI_SHIFT 3
56 #ifdef RTE_LIBRTE_MPIPE_PMD_DEBUG
57 #define PMD_DEBUG_RX(...) RTE_LOG(DEBUG, PMD, __VA_ARGS__)
58 #define PMD_DEBUG_TX(...) RTE_LOG(DEBUG, PMD, __VA_ARGS__)
60 #define PMD_DEBUG_RX(...)
61 #define PMD_DEBUG_TX(...)
64 #define MPIPE_MAX_CHANNELS 128
65 #define MPIPE_TX_MAX_QUEUES 128
66 #define MPIPE_RX_MAX_QUEUES 16
67 #define MPIPE_TX_DESCS 512
68 #define MPIPE_RX_BUCKETS 256
69 #define MPIPE_RX_STACK_SIZE 65536
70 #define MPIPE_RX_IP_ALIGN 2
71 #define MPIPE_BSM_ALIGN 128
73 #define MPIPE_LINK_UPDATE_TIMEOUT 10 /* s */
74 #define MPIPE_LINK_UPDATE_INTERVAL 100000 /* us */
76 struct mpipe_channel_config {
81 gxio_mpipe_rules_stacks_t stacks;
84 struct mpipe_context {
86 gxio_mpipe_context_t context;
87 struct mpipe_channel_config channels[MPIPE_MAX_CHANNELS];
90 /* Per-core local data. */
92 int mbuf_push_debt[RTE_MAX_ETHPORTS]; /* Buffer push debt. */
93 } __rte_cache_aligned;
95 #define MPIPE_BUF_DEBT_THRESHOLD 32
96 static __thread struct mpipe_local mpipe_local;
97 static struct mpipe_context mpipe_contexts[GXIO_MPIPE_INSTANCE_MAX];
98 static int mpipe_instances;
100 /* Per queue statistics. */
101 struct mpipe_queue_stats {
102 uint64_t packets, bytes, errors, nomem;
105 /* Common tx/rx queue fields. */
107 struct mpipe_dev_priv *priv; /* "priv" data of its device. */
108 uint16_t nb_desc; /* Number of tx descriptors. */
109 uint16_t port_id; /* Device index. */
110 uint16_t stat_idx; /* Queue stats index. */
111 uint8_t queue_idx; /* Queue index. */
112 uint8_t link_status; /* 0 = link down. */
113 struct mpipe_queue_stats stats; /* Stat data for the queue. */
116 /* Transmit queue description. */
117 struct mpipe_tx_queue {
118 struct mpipe_queue q; /* Common stuff. */
121 /* Receive queue description. */
122 struct mpipe_rx_queue {
123 struct mpipe_queue q; /* Common stuff. */
124 gxio_mpipe_iqueue_t iqueue; /* mPIPE iqueue. */
125 gxio_mpipe_idesc_t *next_desc; /* Next idesc to process. */
126 int avail_descs; /* Number of available descs. */
127 void *rx_ring_mem; /* DMA ring memory. */
130 struct mpipe_dev_priv {
131 gxio_mpipe_context_t *context; /* mPIPE context. */
132 gxio_mpipe_link_t link; /* mPIPE link for the device. */
133 gxio_mpipe_equeue_t equeue; /* mPIPE equeue. */
134 unsigned equeue_size; /* mPIPE equeue desc count. */
135 int instance; /* mPIPE instance. */
136 int ering; /* mPIPE eDMA ring. */
137 int stack; /* mPIPE buffer stack. */
138 int channel; /* Device channel. */
139 int port_id; /* DPDK port index. */
140 struct rte_eth_dev *eth_dev; /* DPDK device. */
141 struct rte_mbuf **tx_comps; /* TX completion array. */
142 struct rte_mempool *rx_mpool; /* mpool used by the rx queues. */
143 unsigned rx_offset; /* Receive head room. */
144 unsigned rx_size_code; /* mPIPE rx buffer size code. */
145 int is_xaui:1, /* Is this an xgbe or gbe? */
146 initialized:1, /* Initialized port? */
147 running:1; /* Running port? */
148 struct ether_addr mac_addr; /* MAC address. */
149 unsigned nb_rx_queues; /* Configured tx queues. */
150 unsigned nb_tx_queues; /* Configured rx queues. */
151 int first_bucket; /* mPIPE bucket start index. */
152 int first_ring; /* mPIPE notif ring start index. */
153 int notif_group; /* mPIPE notif group. */
154 rte_atomic32_t dp_count __rte_cache_aligned; /* DP Entry count. */
155 int tx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
156 int rx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
159 #define mpipe_priv(dev) \
160 ((struct mpipe_dev_priv*)(dev)->data->dev_private)
162 #define mpipe_name(priv) \
163 ((priv)->eth_dev->data->name)
165 #define mpipe_rx_queue(priv, n) \
166 ((struct mpipe_rx_queue *)(priv)->eth_dev->data->rx_queues[n])
168 #define mpipe_tx_queue(priv, n) \
169 ((struct mpipe_tx_queue *)(priv)->eth_dev->data->tx_queues[n])
172 mpipe_xmit_flush(struct mpipe_dev_priv *priv);
175 mpipe_recv_flush(struct mpipe_dev_priv *priv);
177 static int mpipe_equeue_sizes[] = {
178 [GXIO_MPIPE_EQUEUE_ENTRY_512] = 512,
179 [GXIO_MPIPE_EQUEUE_ENTRY_2K] = 2048,
180 [GXIO_MPIPE_EQUEUE_ENTRY_8K] = 8192,
181 [GXIO_MPIPE_EQUEUE_ENTRY_64K] = 65536,
184 static int mpipe_iqueue_sizes[] = {
185 [GXIO_MPIPE_IQUEUE_ENTRY_128] = 128,
186 [GXIO_MPIPE_IQUEUE_ENTRY_512] = 512,
187 [GXIO_MPIPE_IQUEUE_ENTRY_2K] = 2048,
188 [GXIO_MPIPE_IQUEUE_ENTRY_64K] = 65536,
191 static int mpipe_buffer_sizes[] = {
192 [GXIO_MPIPE_BUFFER_SIZE_128] = 128,
193 [GXIO_MPIPE_BUFFER_SIZE_256] = 256,
194 [GXIO_MPIPE_BUFFER_SIZE_512] = 512,
195 [GXIO_MPIPE_BUFFER_SIZE_1024] = 1024,
196 [GXIO_MPIPE_BUFFER_SIZE_1664] = 1664,
197 [GXIO_MPIPE_BUFFER_SIZE_4096] = 4096,
198 [GXIO_MPIPE_BUFFER_SIZE_10368] = 10368,
199 [GXIO_MPIPE_BUFFER_SIZE_16384] = 16384,
202 static gxio_mpipe_context_t *
203 mpipe_context(int instance)
205 if (instance < 0 || instance >= mpipe_instances)
207 return &mpipe_contexts[instance].context;
210 static int mpipe_channel_config(int instance, int channel,
211 struct mpipe_channel_config *config)
213 struct mpipe_channel_config *data;
214 struct mpipe_context *context;
215 gxio_mpipe_rules_t rules;
218 if (instance < 0 || instance >= mpipe_instances ||
219 channel < 0 || channel >= MPIPE_MAX_CHANNELS)
222 context = &mpipe_contexts[instance];
224 rte_spinlock_lock(&context->lock);
226 gxio_mpipe_rules_init(&rules, &context->context);
228 for (idx = 0; idx < MPIPE_MAX_CHANNELS; idx++) {
229 data = (channel == idx) ? config : &context->channels[idx];
234 rc = gxio_mpipe_rules_begin(&rules, data->first_bucket,
235 data->num_buckets, &data->stacks);
240 rc = gxio_mpipe_rules_add_channel(&rules, idx);
245 rc = gxio_mpipe_rules_set_headroom(&rules, data->head_room);
251 rc = gxio_mpipe_rules_commit(&rules);
253 memcpy(&context->channels[channel], config, sizeof(*config));
257 rte_spinlock_unlock(&context->lock);
263 mpipe_get_size_index(int *array, int count, int size,
268 for (i = 0; i < count && array[i] < size; i++) {
274 return i < count ? (int)i : -ENOENT;
276 return last >= 0 ? last : -ENOENT;
280 mpipe_calc_size(int *array, int count, int size)
282 int index = mpipe_get_size_index(array, count, size, 1);
283 return index < 0 ? index : array[index];
286 static int mpipe_equeue_size(int size)
289 result = mpipe_calc_size(mpipe_equeue_sizes,
290 RTE_DIM(mpipe_equeue_sizes), size);
294 static int mpipe_iqueue_size(int size)
297 result = mpipe_calc_size(mpipe_iqueue_sizes,
298 RTE_DIM(mpipe_iqueue_sizes), size);
302 static int mpipe_buffer_size_index(int size)
305 result = mpipe_get_size_index(mpipe_buffer_sizes,
306 RTE_DIM(mpipe_buffer_sizes), size, 0);
311 mpipe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
312 struct rte_eth_link *link)
314 struct rte_eth_link *dst = link;
315 struct rte_eth_link *src = &(dev->data->dev_link);
317 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
318 *(uint64_t *)src) == 0)
325 mpipe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
326 struct rte_eth_link *link)
328 struct rte_eth_link *dst = &(dev->data->dev_link);
329 struct rte_eth_link *src = link;
331 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
332 *(uint64_t *)src) == 0)
339 mpipe_infos_get(struct rte_eth_dev *dev __rte_unused,
340 struct rte_eth_dev_info *dev_info)
342 dev_info->min_rx_bufsize = 128;
343 dev_info->max_rx_pktlen = 1518;
344 dev_info->max_tx_queues = MPIPE_TX_MAX_QUEUES;
345 dev_info->max_rx_queues = MPIPE_RX_MAX_QUEUES;
346 dev_info->max_mac_addrs = 1;
347 dev_info->rx_offload_capa = 0;
348 dev_info->tx_offload_capa = 0;
352 mpipe_configure(struct rte_eth_dev *dev)
354 struct mpipe_dev_priv *priv = mpipe_priv(dev);
356 if (dev->data->nb_tx_queues > MPIPE_TX_MAX_QUEUES) {
357 RTE_LOG(ERR, PMD, "%s: Too many tx queues: %d > %d\n",
358 mpipe_name(priv), dev->data->nb_tx_queues,
359 MPIPE_TX_MAX_QUEUES);
362 priv->nb_tx_queues = dev->data->nb_tx_queues;
364 if (dev->data->nb_rx_queues > MPIPE_RX_MAX_QUEUES) {
365 RTE_LOG(ERR, PMD, "%s: Too many rx queues: %d > %d\n",
366 mpipe_name(priv), dev->data->nb_rx_queues,
367 MPIPE_RX_MAX_QUEUES);
369 priv->nb_rx_queues = dev->data->nb_rx_queues;
375 mpipe_link_compare(struct rte_eth_link *link1,
376 struct rte_eth_link *link2)
378 return (*(uint64_t *)link1 == *(uint64_t *)link2)
383 mpipe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
385 struct mpipe_dev_priv *priv = mpipe_priv(dev);
386 struct rte_eth_link old, new;
387 int64_t state, speed;
390 memset(&old, 0, sizeof(old));
391 memset(&new, 0, sizeof(new));
392 mpipe_dev_atomic_read_link_status(dev, &old);
394 for (count = 0, rc = 0; count < MPIPE_LINK_UPDATE_TIMEOUT; count++) {
395 if (!priv->initialized)
398 state = gxio_mpipe_link_get_attr(&priv->link,
399 GXIO_MPIPE_LINK_CURRENT_STATE);
403 speed = state & GXIO_MPIPE_LINK_SPEED_MASK;
405 new.link_autoneg = (dev->data->dev_conf.link_speeds &
406 ETH_LINK_SPEED_AUTONEG);
407 if (speed == GXIO_MPIPE_LINK_1G) {
408 new.link_speed = ETH_SPEED_NUM_1G;
409 new.link_duplex = ETH_LINK_FULL_DUPLEX;
410 new.link_status = ETH_LINK_UP;
411 } else if (speed == GXIO_MPIPE_LINK_10G) {
412 new.link_speed = ETH_SPEED_NUM_10G;
413 new.link_duplex = ETH_LINK_FULL_DUPLEX;
414 new.link_status = ETH_LINK_UP;
417 rc = mpipe_link_compare(&old, &new);
418 if (rc == 0 || !wait_to_complete)
421 rte_delay_us(MPIPE_LINK_UPDATE_INTERVAL);
424 mpipe_dev_atomic_write_link_status(dev, &new);
429 mpipe_set_link(struct rte_eth_dev *dev, int up)
431 struct mpipe_dev_priv *priv = mpipe_priv(dev);
434 rc = gxio_mpipe_link_set_attr(&priv->link,
435 GXIO_MPIPE_LINK_DESIRED_STATE,
436 up ? GXIO_MPIPE_LINK_ANYSPEED : 0);
438 RTE_LOG(ERR, PMD, "%s: Failed to set link %s.\n",
439 mpipe_name(priv), up ? "up" : "down");
441 mpipe_link_update(dev, 0);
448 mpipe_set_link_up(struct rte_eth_dev *dev)
450 return mpipe_set_link(dev, 1);
454 mpipe_set_link_down(struct rte_eth_dev *dev)
456 return mpipe_set_link(dev, 0);
460 mpipe_dp_enter(struct mpipe_dev_priv *priv)
462 __insn_mtspr(SPR_DSTREAM_PF, 0);
463 rte_atomic32_inc(&priv->dp_count);
467 mpipe_dp_exit(struct mpipe_dev_priv *priv)
469 rte_atomic32_dec(&priv->dp_count);
473 mpipe_dp_wait(struct mpipe_dev_priv *priv)
475 while (rte_atomic32_read(&priv->dp_count) != 0) {
481 mpipe_mbuf_stack_index(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
483 return (mbuf->port < RTE_MAX_ETHPORTS) ?
484 mpipe_priv(&rte_eth_devices[mbuf->port])->stack :
488 static inline struct rte_mbuf *
489 mpipe_recv_mbuf(struct mpipe_dev_priv *priv, gxio_mpipe_idesc_t *idesc,
492 void *va = gxio_mpipe_idesc_get_va(idesc);
493 uint16_t size = gxio_mpipe_idesc_get_xfer_size(idesc);
494 struct rte_mbuf *mbuf = RTE_PTR_SUB(va, priv->rx_offset);
496 rte_pktmbuf_reset(mbuf);
497 mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
498 mbuf->port = in_port;
499 mbuf->data_len = size;
500 mbuf->pkt_len = size;
501 mbuf->hash.rss = gxio_mpipe_idesc_get_flow_hash(idesc);
503 PMD_DEBUG_RX("%s: RX mbuf %p, buffer %p, buf_addr %p, size %d\n",
504 mpipe_name(priv), mbuf, va, mbuf->buf_addr, size);
510 mpipe_recv_push(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
512 const int offset = RTE_PKTMBUF_HEADROOM + MPIPE_RX_IP_ALIGN;
513 void *buf_addr = RTE_PTR_ADD(mbuf->buf_addr, offset);
515 gxio_mpipe_push_buffer(priv->context, priv->stack, buf_addr);
516 PMD_DEBUG_RX("%s: Pushed mbuf %p, buffer %p into stack %d\n",
517 mpipe_name(priv), mbuf, buf_addr, priv->stack);
521 mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count)
523 struct rte_mbuf *mbuf;
526 for (i = 0; i < count; i++) {
527 mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
530 mpipe_recv_push(priv, mbuf);
533 PMD_DEBUG_RX("%s: Filled %d/%d buffers\n", mpipe_name(priv), i, count);
537 mpipe_recv_flush_stack(struct mpipe_dev_priv *priv)
539 const int offset = priv->rx_offset & ~RTE_MEMPOOL_ALIGN_MASK;
540 uint8_t in_port = priv->port_id;
541 struct rte_mbuf *mbuf;
545 va = gxio_mpipe_pop_buffer(priv->context, priv->stack);
548 mbuf = RTE_PTR_SUB(va, offset);
550 PMD_DEBUG_RX("%s: Flushing mbuf %p, va %p\n",
551 mpipe_name(priv), mbuf, va);
553 mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
556 mbuf->port = in_port;
557 mbuf->packet_type = 0;
561 __rte_mbuf_raw_free(mbuf);
566 mpipe_register_segment(struct mpipe_dev_priv *priv, const struct rte_memseg *ms)
568 size_t size = ms->hugepage_sz;
572 for (addr = ms->addr, end = addr + ms->len; addr < end; addr += size) {
573 rc = gxio_mpipe_register_page(priv->context, priv->stack, addr,
580 RTE_LOG(ERR, PMD, "%s: Could not register memseg @%p, %d.\n",
581 mpipe_name(priv), ms->addr, rc);
583 RTE_LOG(DEBUG, PMD, "%s: Registered segment %p - %p\n",
584 mpipe_name(priv), ms->addr,
585 RTE_PTR_ADD(ms->addr, ms->len - 1));
590 mpipe_recv_init(struct mpipe_dev_priv *priv)
592 const struct rte_memseg *seg = rte_eal_get_physmem_layout();
597 if (!priv->rx_mpool) {
598 RTE_LOG(ERR, PMD, "%s: No buffer pool.\n",
603 /* Allocate one NotifRing for each queue. */
604 rc = gxio_mpipe_alloc_notif_rings(priv->context, MPIPE_RX_MAX_QUEUES,
607 RTE_LOG(ERR, PMD, "%s: Failed to allocate notif rings.\n",
611 priv->first_ring = rc;
613 /* Allocate a NotifGroup. */
614 rc = gxio_mpipe_alloc_notif_groups(priv->context, 1, 0, 0);
616 RTE_LOG(ERR, PMD, "%s: Failed to allocate rx group.\n",
620 priv->notif_group = rc;
622 /* Allocate required buckets. */
623 rc = gxio_mpipe_alloc_buckets(priv->context, MPIPE_RX_BUCKETS, 0, 0);
625 RTE_LOG(ERR, PMD, "%s: Failed to allocate buckets.\n",
629 priv->first_bucket = rc;
631 rc = gxio_mpipe_alloc_buffer_stacks(priv->context, 1, 0, 0);
633 RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer stack.\n",
639 while (seg && seg->addr)
640 mpipe_register_segment(priv, seg++);
642 stack_size = gxio_mpipe_calc_buffer_stack_bytes(MPIPE_RX_STACK_SIZE);
643 stack_mem = rte_zmalloc(NULL, stack_size, 65536);
645 RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer memory.\n",
649 RTE_LOG(DEBUG, PMD, "%s: Buffer stack memory %p - %p.\n",
650 mpipe_name(priv), stack_mem,
651 RTE_PTR_ADD(stack_mem, stack_size - 1));
654 rc = gxio_mpipe_init_buffer_stack(priv->context, priv->stack,
655 priv->rx_size_code, stack_mem,
658 RTE_LOG(ERR, PMD, "%s: Failed to initialize buffer stack.\n",
667 mpipe_xmit_init(struct mpipe_dev_priv *priv)
673 /* Allocate eDMA ring. */
674 rc = gxio_mpipe_alloc_edma_rings(priv->context, 1, 0, 0);
676 RTE_LOG(ERR, PMD, "%s: Failed to alloc tx ring.\n",
682 rc = mpipe_equeue_size(MPIPE_TX_DESCS);
684 RTE_LOG(ERR, PMD, "%s: Cannot allocate %d equeue descs.\n",
685 mpipe_name(priv), (int)MPIPE_TX_DESCS);
688 priv->equeue_size = rc;
690 /* Initialize completion array. */
691 ring_size = sizeof(priv->tx_comps[0]) * priv->equeue_size;
692 priv->tx_comps = rte_zmalloc(NULL, ring_size, RTE_CACHE_LINE_SIZE);
693 if (!priv->tx_comps) {
694 RTE_LOG(ERR, PMD, "%s: Failed to allocate egress comps.\n",
699 /* Allocate eDMA ring memory. */
700 ring_size = sizeof(gxio_mpipe_edesc_t) * priv->equeue_size;
701 ring_mem = rte_zmalloc(NULL, ring_size, ring_size);
703 RTE_LOG(ERR, PMD, "%s: Failed to allocate egress descs.\n",
707 RTE_LOG(DEBUG, PMD, "%s: eDMA ring memory %p - %p.\n",
708 mpipe_name(priv), ring_mem,
709 RTE_PTR_ADD(ring_mem, ring_size - 1));
712 /* Initialize eDMA ring. */
713 rc = gxio_mpipe_equeue_init(&priv->equeue, priv->context, priv->ering,
714 priv->channel, ring_mem, ring_size, 0);
716 RTE_LOG(ERR, PMD, "%s: Failed to init equeue\n",
725 mpipe_link_init(struct mpipe_dev_priv *priv)
730 rc = gxio_mpipe_link_open(&priv->link, priv->context,
731 mpipe_name(priv), GXIO_MPIPE_LINK_AUTO_NONE);
733 RTE_LOG(ERR, PMD, "%s: Failed to open link.\n",
738 /* Get the channel index. */
739 rc = gxio_mpipe_link_channel(&priv->link);
741 RTE_LOG(ERR, PMD, "%s: Bad channel\n",
751 mpipe_init(struct mpipe_dev_priv *priv)
755 if (priv->initialized)
758 rc = mpipe_recv_init(priv);
760 RTE_LOG(ERR, PMD, "%s: Failed to init rx.\n",
765 rc = mpipe_xmit_init(priv);
767 RTE_LOG(ERR, PMD, "%s: Failed to init tx.\n",
773 priv->initialized = 1;
779 mpipe_start(struct rte_eth_dev *dev)
781 struct mpipe_dev_priv *priv = mpipe_priv(dev);
782 struct mpipe_channel_config config;
783 struct mpipe_rx_queue *rx_queue;
784 struct rte_eth_link eth_link;
785 unsigned queue, buffers = 0;
790 memset(ð_link, 0, sizeof(eth_link));
791 mpipe_dev_atomic_write_link_status(dev, ð_link);
793 rc = mpipe_init(priv);
797 /* Initialize NotifRings. */
798 for (queue = 0; queue < priv->nb_rx_queues; queue++) {
799 rx_queue = mpipe_rx_queue(priv, queue);
800 ring_size = rx_queue->q.nb_desc * sizeof(gxio_mpipe_idesc_t);
802 ring_mem = rte_malloc(NULL, ring_size, ring_size);
804 RTE_LOG(ERR, PMD, "%s: Failed to alloc rx descs.\n",
808 RTE_LOG(DEBUG, PMD, "%s: iDMA ring %d memory %p - %p.\n",
809 mpipe_name(priv), queue, ring_mem,
810 RTE_PTR_ADD(ring_mem, ring_size - 1));
813 rc = gxio_mpipe_iqueue_init(&rx_queue->iqueue, priv->context,
814 priv->first_ring + queue, ring_mem,
817 RTE_LOG(ERR, PMD, "%s: Failed to init rx queue.\n",
822 rx_queue->rx_ring_mem = ring_mem;
823 buffers += rx_queue->q.nb_desc;
826 /* Initialize ingress NotifGroup and buckets. */
827 rc = gxio_mpipe_init_notif_group_and_buckets(priv->context,
828 priv->notif_group, priv->first_ring, priv->nb_rx_queues,
829 priv->first_bucket, MPIPE_RX_BUCKETS,
830 GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY);
832 RTE_LOG(ERR, PMD, "%s: Failed to init group and buckets.\n",
837 /* Configure the classifier to deliver packets from this port. */
839 config.first_bucket = priv->first_bucket;
840 config.num_buckets = MPIPE_RX_BUCKETS;
841 memset(&config.stacks, 0xff, sizeof(config.stacks));
842 config.stacks.stacks[priv->rx_size_code] = priv->stack;
843 config.head_room = priv->rx_offset & RTE_MEMPOOL_ALIGN_MASK;
845 rc = mpipe_channel_config(priv->instance, priv->channel,
848 RTE_LOG(ERR, PMD, "%s: Failed to setup classifier.\n",
853 /* Fill empty buffers into the buffer stack. */
854 mpipe_recv_fill_stack(priv, buffers);
856 /* Bring up the link. */
857 mpipe_set_link_up(dev);
859 /* Start xmit/recv on queues. */
860 for (queue = 0; queue < priv->nb_tx_queues; queue++)
861 mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
862 for (queue = 0; queue < priv->nb_rx_queues; queue++)
863 mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
870 mpipe_stop(struct rte_eth_dev *dev)
872 struct mpipe_dev_priv *priv = mpipe_priv(dev);
873 struct mpipe_channel_config config;
877 for (queue = 0; queue < priv->nb_tx_queues; queue++)
878 mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
879 for (queue = 0; queue < priv->nb_rx_queues; queue++)
880 mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
882 /* Make sure the link_status writes land. */
886 * Wait for link_status change to register with straggling datapath
891 /* Bring down the link. */
892 mpipe_set_link_down(dev);
894 /* Remove classifier rules. */
895 memset(&config, 0, sizeof(config));
896 rc = mpipe_channel_config(priv->instance, priv->channel,
899 RTE_LOG(ERR, PMD, "%s: Failed to stop classifier.\n",
903 /* Flush completed xmit packets. */
904 mpipe_xmit_flush(priv);
906 /* Flush buffer stacks. */
907 mpipe_recv_flush(priv);
913 mpipe_close(struct rte_eth_dev *dev)
915 struct mpipe_dev_priv *priv = mpipe_priv(dev);
921 mpipe_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
923 struct mpipe_dev_priv *priv = mpipe_priv(dev);
924 struct mpipe_tx_queue *tx_queue;
925 struct mpipe_rx_queue *rx_queue;
929 memset(stats, 0, sizeof(*stats));
931 for (i = 0; i < priv->nb_tx_queues; i++) {
932 tx_queue = mpipe_tx_queue(priv, i);
934 stats->opackets += tx_queue->q.stats.packets;
935 stats->obytes += tx_queue->q.stats.bytes;
936 stats->oerrors += tx_queue->q.stats.errors;
938 idx = tx_queue->q.stat_idx;
939 if (idx != (uint16_t)-1) {
940 stats->q_opackets[idx] += tx_queue->q.stats.packets;
941 stats->q_obytes[idx] += tx_queue->q.stats.bytes;
942 stats->q_errors[idx] += tx_queue->q.stats.errors;
946 for (i = 0; i < priv->nb_rx_queues; i++) {
947 rx_queue = mpipe_rx_queue(priv, i);
949 stats->ipackets += rx_queue->q.stats.packets;
950 stats->ibytes += rx_queue->q.stats.bytes;
951 stats->ierrors += rx_queue->q.stats.errors;
952 stats->rx_nombuf += rx_queue->q.stats.nomem;
954 idx = rx_queue->q.stat_idx;
955 if (idx != (uint16_t)-1) {
956 stats->q_ipackets[idx] += rx_queue->q.stats.packets;
957 stats->q_ibytes[idx] += rx_queue->q.stats.bytes;
958 stats->q_errors[idx] += rx_queue->q.stats.errors;
964 mpipe_stats_reset(struct rte_eth_dev *dev)
966 struct mpipe_dev_priv *priv = mpipe_priv(dev);
967 struct mpipe_tx_queue *tx_queue;
968 struct mpipe_rx_queue *rx_queue;
971 for (i = 0; i < priv->nb_tx_queues; i++) {
972 tx_queue = mpipe_tx_queue(priv, i);
973 memset(&tx_queue->q.stats, 0, sizeof(tx_queue->q.stats));
976 for (i = 0; i < priv->nb_rx_queues; i++) {
977 rx_queue = mpipe_rx_queue(priv, i);
978 memset(&rx_queue->q.stats, 0, sizeof(rx_queue->q.stats));
983 mpipe_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id,
984 uint8_t stat_idx, uint8_t is_rx)
986 struct mpipe_dev_priv *priv = mpipe_priv(dev);
989 priv->rx_stat_mapping[stat_idx] = queue_id;
991 priv->tx_stat_mapping[stat_idx] = queue_id;
998 mpipe_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
999 uint16_t nb_desc, unsigned int socket_id __rte_unused,
1000 const struct rte_eth_txconf *tx_conf __rte_unused)
1002 struct mpipe_tx_queue *tx_queue = dev->data->tx_queues[queue_idx];
1003 struct mpipe_dev_priv *priv = mpipe_priv(dev);
1006 tx_queue = rte_realloc(tx_queue, sizeof(*tx_queue),
1007 RTE_CACHE_LINE_SIZE);
1009 RTE_LOG(ERR, PMD, "%s: Failed to allocate TX queue.\n",
1014 memset(&tx_queue->q, 0, sizeof(tx_queue->q));
1015 tx_queue->q.priv = priv;
1016 tx_queue->q.queue_idx = queue_idx;
1017 tx_queue->q.port_id = dev->data->port_id;
1018 tx_queue->q.nb_desc = nb_desc;
1020 tx_queue->q.stat_idx = -1;
1021 for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
1022 if (priv->tx_stat_mapping[idx] == queue_idx)
1023 tx_queue->q.stat_idx = idx;
1026 dev->data->tx_queues[queue_idx] = tx_queue;
1032 mpipe_tx_queue_release(void *_txq)
1038 mpipe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1039 uint16_t nb_desc, unsigned int socket_id __rte_unused,
1040 const struct rte_eth_rxconf *rx_conf __rte_unused,
1041 struct rte_mempool *mp)
1043 struct mpipe_rx_queue *rx_queue = dev->data->rx_queues[queue_idx];
1044 struct mpipe_dev_priv *priv = mpipe_priv(dev);
1048 rc = mpipe_iqueue_size(nb_desc);
1050 RTE_LOG(ERR, PMD, "%s: Cannot allocate %d iqueue descs.\n",
1051 mpipe_name(priv), (int)nb_desc);
1055 if (rc != nb_desc) {
1056 RTE_LOG(WARNING, PMD, "%s: Extending RX descs from %d to %d.\n",
1057 mpipe_name(priv), (int)nb_desc, rc);
1061 size = sizeof(*rx_queue);
1062 rx_queue = rte_realloc(rx_queue, size, RTE_CACHE_LINE_SIZE);
1064 RTE_LOG(ERR, PMD, "%s: Failed to allocate RX queue.\n",
1069 memset(&rx_queue->q, 0, sizeof(rx_queue->q));
1070 rx_queue->q.priv = priv;
1071 rx_queue->q.nb_desc = nb_desc;
1072 rx_queue->q.port_id = dev->data->port_id;
1073 rx_queue->q.queue_idx = queue_idx;
1075 if (!priv->rx_mpool) {
1076 int size = (rte_pktmbuf_data_room_size(mp) -
1077 RTE_PKTMBUF_HEADROOM -
1080 priv->rx_offset = (sizeof(struct rte_mbuf) +
1081 rte_pktmbuf_priv_size(mp) +
1082 RTE_PKTMBUF_HEADROOM +
1085 RTE_LOG(ERR, PMD, "%s: Bad buffer size %d.\n",
1087 rte_pktmbuf_data_room_size(mp));
1091 priv->rx_size_code = mpipe_buffer_size_index(size);
1092 priv->rx_mpool = mp;
1095 if (priv->rx_mpool != mp) {
1096 RTE_LOG(WARNING, PMD, "%s: Ignoring multiple buffer pools.\n",
1100 rx_queue->q.stat_idx = -1;
1101 for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
1102 if (priv->rx_stat_mapping[idx] == queue_idx)
1103 rx_queue->q.stat_idx = idx;
1106 dev->data->rx_queues[queue_idx] = rx_queue;
1112 mpipe_rx_queue_release(void *_rxq)
1117 #define MPIPE_XGBE_ENA_HASH_MULTI \
1118 (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_MULTI_SHIFT)
1119 #define MPIPE_XGBE_ENA_HASH_UNI \
1120 (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_UNI_SHIFT)
1121 #define MPIPE_XGBE_COPY_ALL \
1122 (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__COPY_ALL_SHIFT)
1123 #define MPIPE_GBE_ENA_MULTI_HASH \
1124 (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__MULTI_HASH_ENA_SHIFT)
1125 #define MPIPE_GBE_ENA_UNI_HASH \
1126 (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__UNI_HASH_ENA_SHIFT)
1127 #define MPIPE_GBE_COPY_ALL \
1128 (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__COPY_ALL_SHIFT)
1131 mpipe_promiscuous_enable(struct rte_eth_dev *dev)
1133 struct mpipe_dev_priv *priv = mpipe_priv(dev);
1137 if (priv->is_xaui) {
1138 addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
1139 reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
1140 reg &= ~MPIPE_XGBE_ENA_HASH_MULTI;
1141 reg &= ~MPIPE_XGBE_ENA_HASH_UNI;
1142 reg |= MPIPE_XGBE_COPY_ALL;
1143 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1145 addr = MPIPE_GBE_NETWORK_CONFIGURATION;
1146 reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
1147 reg &= ~MPIPE_GBE_ENA_MULTI_HASH;
1148 reg &= ~MPIPE_GBE_ENA_UNI_HASH;
1149 reg |= MPIPE_GBE_COPY_ALL;
1150 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1155 mpipe_promiscuous_disable(struct rte_eth_dev *dev)
1157 struct mpipe_dev_priv *priv = mpipe_priv(dev);
1161 if (priv->is_xaui) {
1162 addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
1163 reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
1164 reg |= MPIPE_XGBE_ENA_HASH_MULTI;
1165 reg |= MPIPE_XGBE_ENA_HASH_UNI;
1166 reg &= ~MPIPE_XGBE_COPY_ALL;
1167 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1169 addr = MPIPE_GBE_NETWORK_CONFIGURATION;
1170 reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
1171 reg |= MPIPE_GBE_ENA_MULTI_HASH;
1172 reg |= MPIPE_GBE_ENA_UNI_HASH;
1173 reg &= ~MPIPE_GBE_COPY_ALL;
1174 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1178 static const struct eth_dev_ops mpipe_dev_ops = {
1179 .dev_infos_get = mpipe_infos_get,
1180 .dev_configure = mpipe_configure,
1181 .dev_start = mpipe_start,
1182 .dev_stop = mpipe_stop,
1183 .dev_close = mpipe_close,
1184 .stats_get = mpipe_stats_get,
1185 .stats_reset = mpipe_stats_reset,
1186 .queue_stats_mapping_set = mpipe_queue_stats_mapping_set,
1187 .tx_queue_setup = mpipe_tx_queue_setup,
1188 .rx_queue_setup = mpipe_rx_queue_setup,
1189 .tx_queue_release = mpipe_tx_queue_release,
1190 .rx_queue_release = mpipe_rx_queue_release,
1191 .link_update = mpipe_link_update,
1192 .dev_set_link_up = mpipe_set_link_up,
1193 .dev_set_link_down = mpipe_set_link_down,
1194 .promiscuous_enable = mpipe_promiscuous_enable,
1195 .promiscuous_disable = mpipe_promiscuous_disable,
1199 mpipe_xmit_null(struct mpipe_dev_priv *priv, int64_t start, int64_t end)
1201 gxio_mpipe_edesc_t null_desc = { { .bound = 1, .ns = 1 } };
1202 gxio_mpipe_equeue_t *equeue = &priv->equeue;
1205 for (slot = start; slot < end; slot++) {
1206 gxio_mpipe_equeue_put_at(equeue, null_desc, slot);
1211 mpipe_xmit_flush(struct mpipe_dev_priv *priv)
1213 gxio_mpipe_equeue_t *equeue = &priv->equeue;
1216 /* Post a dummy descriptor and wait for its return. */
1217 slot = gxio_mpipe_equeue_reserve(equeue, 1);
1219 RTE_LOG(ERR, PMD, "%s: Failed to reserve stop slot.\n",
1224 mpipe_xmit_null(priv, slot, slot + 1);
1226 while (!gxio_mpipe_equeue_is_complete(equeue, slot, 1)) {
1230 for (slot = 0; slot < priv->equeue_size; slot++) {
1231 if (priv->tx_comps[slot])
1232 rte_pktmbuf_free_seg(priv->tx_comps[slot]);
1237 mpipe_recv_flush(struct mpipe_dev_priv *priv)
1239 uint8_t in_port = priv->port_id;
1240 struct mpipe_rx_queue *rx_queue;
1241 gxio_mpipe_iqueue_t *iqueue;
1242 gxio_mpipe_idesc_t idesc;
1243 struct rte_mbuf *mbuf;
1246 /* Release packets on the buffer stack. */
1247 mpipe_recv_flush_stack(priv);
1249 /* Flush packets sitting in recv queues. */
1250 for (queue = 0; queue < priv->nb_rx_queues; queue++) {
1251 rx_queue = mpipe_rx_queue(priv, queue);
1252 iqueue = &rx_queue->iqueue;
1253 while (gxio_mpipe_iqueue_try_get(iqueue, &idesc) >= 0) {
1254 /* Skip idesc with the 'buffer error' bit set. */
1257 mbuf = mpipe_recv_mbuf(priv, &idesc, in_port);
1258 rte_pktmbuf_free(mbuf);
1260 rte_free(rx_queue->rx_ring_mem);
1264 static inline uint16_t
1265 mpipe_do_xmit(struct mpipe_tx_queue *tx_queue, struct rte_mbuf **tx_pkts,
1268 struct mpipe_dev_priv *priv = tx_queue->q.priv;
1269 gxio_mpipe_equeue_t *equeue = &priv->equeue;
1270 unsigned nb_bytes = 0;
1271 unsigned nb_sent = 0;
1275 PMD_DEBUG_TX("Trying to transmit %d packets on %s:%d.\n",
1276 nb_pkts, mpipe_name(tx_queue->q.priv),
1277 tx_queue->q.queue_idx);
1279 /* Optimistic assumption that we need exactly one slot per packet. */
1280 nb_slots = RTE_MIN(nb_pkts, MPIPE_TX_DESCS / 2);
1283 struct rte_mbuf *mbuf = NULL, *pkt = NULL;
1286 /* Reserve eDMA ring slots. */
1287 slot = gxio_mpipe_equeue_try_reserve_fast(equeue, nb_slots);
1288 if (unlikely(slot < 0)) {
1292 for (i = 0; i < nb_slots; i++) {
1293 unsigned idx = (slot + i) & (priv->equeue_size - 1);
1294 rte_prefetch0(priv->tx_comps[idx]);
1297 /* Fill up slots with descriptor and completion info. */
1298 for (i = 0; i < nb_slots; i++) {
1299 unsigned idx = (slot + i) & (priv->equeue_size - 1);
1300 gxio_mpipe_edesc_t desc;
1301 struct rte_mbuf *next;
1303 /* Starting on a new packet? */
1304 if (likely(!mbuf)) {
1305 int room = nb_slots - i;
1307 pkt = mbuf = tx_pkts[nb_sent];
1309 /* Bail out if we run out of descs. */
1310 if (unlikely(pkt->nb_segs > room))
1316 /* We have a segment to send. */
1319 if (priv->tx_comps[idx])
1320 rte_pktmbuf_free_seg(priv->tx_comps[idx]);
1322 port_id = (mbuf->port < RTE_MAX_ETHPORTS) ?
1323 mbuf->port : priv->port_id;
1324 desc = (gxio_mpipe_edesc_t) { {
1325 .va = rte_pktmbuf_mtod(mbuf, uintptr_t),
1326 .xfer_size = rte_pktmbuf_data_len(mbuf),
1327 .bound = next ? 0 : 1,
1328 .stack_idx = mpipe_mbuf_stack_index(priv, mbuf),
1329 .size = priv->rx_size_code,
1331 if (mpipe_local.mbuf_push_debt[port_id] > 0) {
1332 mpipe_local.mbuf_push_debt[port_id]--;
1334 priv->tx_comps[idx] = NULL;
1336 priv->tx_comps[idx] = mbuf;
1338 nb_bytes += mbuf->data_len;
1339 gxio_mpipe_equeue_put_at(equeue, desc, slot + i);
1341 PMD_DEBUG_TX("%s:%d: Sending packet %p, len %d\n",
1343 tx_queue->q.queue_idx,
1344 rte_pktmbuf_mtod(mbuf, void *),
1345 rte_pktmbuf_data_len(mbuf));
1350 if (unlikely(nb_sent < nb_pkts)) {
1352 /* Fill remaining slots with null descriptors. */
1353 mpipe_xmit_null(priv, slot + i, slot + nb_slots);
1356 * Calculate exact number of descriptors needed for
1357 * the next go around.
1360 for (i = nb_sent; i < nb_pkts; i++) {
1361 nb_slots += tx_pkts[i]->nb_segs;
1364 nb_slots = RTE_MIN(nb_slots, MPIPE_TX_DESCS / 2);
1366 } while (nb_sent < nb_pkts);
1368 tx_queue->q.stats.packets += nb_sent;
1369 tx_queue->q.stats.bytes += nb_bytes;
1374 static inline uint16_t
1375 mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts,
1378 struct mpipe_dev_priv *priv = rx_queue->q.priv;
1379 gxio_mpipe_iqueue_t *iqueue = &rx_queue->iqueue;
1380 gxio_mpipe_idesc_t *first_idesc, *idesc, *last_idesc;
1381 uint8_t in_port = rx_queue->q.port_id;
1382 const unsigned look_ahead = 8;
1383 int room = nb_pkts, rc = 0;
1384 unsigned nb_packets = 0;
1385 unsigned nb_dropped = 0;
1386 unsigned nb_nomem = 0;
1387 unsigned nb_bytes = 0;
1388 unsigned nb_descs, i;
1390 while (room && !rc) {
1391 if (rx_queue->avail_descs < room) {
1392 rc = gxio_mpipe_iqueue_try_peek(iqueue,
1393 &rx_queue->next_desc);
1394 rx_queue->avail_descs = rc < 0 ? 0 : rc;
1397 if (unlikely(!rx_queue->avail_descs)) {
1401 nb_descs = RTE_MIN(room, rx_queue->avail_descs);
1403 first_idesc = rx_queue->next_desc;
1404 last_idesc = first_idesc + nb_descs;
1406 rx_queue->next_desc += nb_descs;
1407 rx_queue->avail_descs -= nb_descs;
1409 for (i = 1; i < look_ahead; i++) {
1410 rte_prefetch0(first_idesc + i);
1413 PMD_DEBUG_RX("%s:%d: Trying to receive %d packets\n",
1414 mpipe_name(rx_queue->q.priv),
1415 rx_queue->q.queue_idx,
1418 for (idesc = first_idesc; idesc < last_idesc; idesc++) {
1419 struct rte_mbuf *mbuf;
1421 PMD_DEBUG_RX("%s:%d: processing idesc %d/%d\n",
1423 rx_queue->q.queue_idx,
1424 nb_packets, nb_descs);
1426 rte_prefetch0(idesc + look_ahead);
1428 PMD_DEBUG_RX("%s:%d: idesc %p, %s%s%s%s%s%s%s%s%s%s"
1429 "size: %d, bkt: %d, chan: %d, ring: %d, sqn: %lu, va: %lu\n",
1431 rx_queue->q.queue_idx,
1433 idesc->me ? "me, " : "",
1434 idesc->tr ? "tr, " : "",
1435 idesc->ce ? "ce, " : "",
1436 idesc->ct ? "ct, " : "",
1437 idesc->cs ? "cs, " : "",
1438 idesc->nr ? "nr, " : "",
1439 idesc->sq ? "sq, " : "",
1440 idesc->ts ? "ts, " : "",
1441 idesc->ps ? "ps, " : "",
1442 idesc->be ? "be, " : "",
1447 (unsigned long)idesc->packet_sqn,
1448 (unsigned long)idesc->va);
1450 if (unlikely(gxio_mpipe_idesc_has_error(idesc))) {
1452 gxio_mpipe_iqueue_drop(iqueue, idesc);
1453 PMD_DEBUG_RX("%s:%d: Descriptor error\n",
1454 mpipe_name(rx_queue->q.priv),
1455 rx_queue->q.queue_idx);
1459 if (mpipe_local.mbuf_push_debt[in_port] <
1460 MPIPE_BUF_DEBT_THRESHOLD)
1461 mpipe_local.mbuf_push_debt[in_port]++;
1463 mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
1464 if (unlikely(!mbuf)) {
1466 gxio_mpipe_iqueue_drop(iqueue, idesc);
1467 PMD_DEBUG_RX("%s:%d: alloc failure\n",
1468 mpipe_name(rx_queue->q.priv),
1469 rx_queue->q.queue_idx);
1473 mpipe_recv_push(priv, mbuf);
1476 /* Get and setup the mbuf for the received packet. */
1477 mbuf = mpipe_recv_mbuf(priv, idesc, in_port);
1479 /* Update results and statistics counters. */
1480 rx_pkts[nb_packets] = mbuf;
1481 nb_bytes += mbuf->pkt_len;
1486 * We release the ring in bursts, but do not track and release
1487 * buckets. This therefore breaks dynamic flow affinity, but
1488 * we always operate in static affinity mode, and so we're OK
1489 * with this optimization.
1491 gxio_mpipe_iqueue_advance(iqueue, nb_descs);
1492 gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, nb_descs);
1495 * Go around once more if we haven't yet peeked the queue, and
1496 * if we have more room to receive.
1498 room = nb_pkts - nb_packets;
1501 rx_queue->q.stats.packets += nb_packets;
1502 rx_queue->q.stats.bytes += nb_bytes;
1503 rx_queue->q.stats.errors += nb_dropped;
1504 rx_queue->q.stats.nomem += nb_nomem;
1506 PMD_DEBUG_RX("%s:%d: RX: %d/%d pkts/bytes, %d/%d drops/nomem\n",
1507 mpipe_name(rx_queue->q.priv), rx_queue->q.queue_idx,
1508 nb_packets, nb_bytes, nb_dropped, nb_nomem);
1514 mpipe_recv_pkts(void *_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1516 struct mpipe_rx_queue *rx_queue = _rxq;
1517 uint16_t result = 0;
1520 mpipe_dp_enter(rx_queue->q.priv);
1521 if (likely(rx_queue->q.link_status))
1522 result = mpipe_do_recv(rx_queue, rx_pkts, nb_pkts);
1523 mpipe_dp_exit(rx_queue->q.priv);
1530 mpipe_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1532 struct mpipe_tx_queue *tx_queue = _txq;
1533 uint16_t result = 0;
1536 mpipe_dp_enter(tx_queue->q.priv);
1537 if (likely(tx_queue->q.link_status))
1538 result = mpipe_do_xmit(tx_queue, tx_pkts, nb_pkts);
1539 mpipe_dp_exit(tx_queue->q.priv);
1546 mpipe_link_mac(const char *ifname, uint8_t *mac)
1549 char name[GXIO_MPIPE_LINK_NAME_LEN];
1551 for (idx = 0, rc = 0; !rc; idx++) {
1552 rc = gxio_mpipe_link_enumerate_mac(idx, name, mac);
1553 if (!rc && !strncmp(name, ifname, GXIO_MPIPE_LINK_NAME_LEN))
1560 rte_pmd_mpipe_probe_common(struct rte_vdev_driver *drv, const char *ifname,
1561 const char *params __rte_unused)
1563 gxio_mpipe_context_t *context;
1564 struct rte_eth_dev *eth_dev;
1565 struct mpipe_dev_priv *priv;
1569 /* Get the mPIPE instance that the device belongs to. */
1570 instance = gxio_mpipe_link_instance(ifname);
1571 context = mpipe_context(instance);
1573 RTE_LOG(ERR, PMD, "%s: No device for link.\n", ifname);
1577 priv = rte_zmalloc(NULL, sizeof(*priv), 0);
1579 RTE_LOG(ERR, PMD, "%s: Failed to allocate priv.\n", ifname);
1583 memset(&priv->tx_stat_mapping, 0xff, sizeof(priv->tx_stat_mapping));
1584 memset(&priv->rx_stat_mapping, 0xff, sizeof(priv->rx_stat_mapping));
1585 priv->context = context;
1586 priv->instance = instance;
1587 priv->is_xaui = (strncmp(ifname, "xgbe", 4) == 0);
1590 mac = priv->mac_addr.addr_bytes;
1591 rc = mpipe_link_mac(ifname, mac);
1593 RTE_LOG(ERR, PMD, "%s: Failed to enumerate link.\n", ifname);
1598 eth_dev = rte_eth_dev_allocate(ifname);
1600 RTE_LOG(ERR, PMD, "%s: Failed to allocate device.\n", ifname);
1605 RTE_LOG(INFO, PMD, "%s: Initialized mpipe device"
1606 "(mac %02x:%02x:%02x:%02x:%02x:%02x).\n",
1607 ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1609 priv->eth_dev = eth_dev;
1610 priv->port_id = eth_dev->data->port_id;
1611 eth_dev->data->dev_private = priv;
1612 eth_dev->data->mac_addrs = &priv->mac_addr;
1614 eth_dev->data->kdrv = RTE_KDRV_NONE;
1615 eth_dev->driver = NULL;
1616 eth_dev->data->drv_name = drv->driver.name;
1617 eth_dev->data->numa_node = instance;
1619 eth_dev->dev_ops = &mpipe_dev_ops;
1620 eth_dev->rx_pkt_burst = &mpipe_recv_pkts;
1621 eth_dev->tx_pkt_burst = &mpipe_xmit_pkts;
1623 rc = mpipe_link_init(priv);
1625 RTE_LOG(ERR, PMD, "%s: Failed to init link.\n",
1633 static int rte_pmd_mpipe_xgbe_probe(const char *ifname, const char *params);
1634 static int rte_pmd_mpipe_gbe_probe(const char *ifname, const char *params);
1636 static struct rte_vdev_driver pmd_mpipe_xgbe_drv = {
1637 .probe = rte_pmd_mpipe_xgbe_probe,
1640 static struct rte_vdev_driver pmd_mpipe_gbe_drv = {
1641 .probe = rte_pmd_mpipe_gbe_probe,
1645 rte_pmd_mpipe_xgbe_probe(const char *ifname, const char *params __rte_unused)
1647 return rte_pmd_mpipe_probe_common(&pmd_mpipe_xgbe_drv, ifname, params);
1651 rte_pmd_mpipe_gbe_probe(const char *ifname, const char *params __rte_unused)
1653 return rte_pmd_mpipe_probe_common(&pmd_mpipe_gbe_drv, ifname, params);
1656 RTE_PMD_REGISTER_VDEV(net_mpipe_xgbe, pmd_mpipe_xgbe_drv);
1657 RTE_PMD_REGISTER_ALIAS(net_mpipe_xgbe, xgbe);
1658 RTE_PMD_REGISTER_VDEV(net_mpipe_gbe, pmd_mpipe_gbe_drv);
1659 RTE_PMD_REGISTER_ALIAS(net_mpipe_gbe, gbe);
1661 static void __attribute__((constructor, used))
1662 mpipe_init_contexts(void)
1664 struct mpipe_context *context;
1667 for (instance = 0; instance < GXIO_MPIPE_INSTANCE_MAX; instance++) {
1668 context = &mpipe_contexts[instance];
1670 rte_spinlock_init(&context->lock);
1671 rc = gxio_mpipe_init(&context->context, instance);
1676 mpipe_instances = instance;