X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fbase%2Fvnic_rq.h;h=cfe65015d5fec601590d8145d420e2f50bed07f1;hb=4d4e245ad637c0befbd6da9976c28174a1d74b88;hp=54b6612362030bfce992061cfa8c58f278797d8c;hpb=72f3de308fce9d6b537440f5f1a414c069d2c79d;p=dpdk.git diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h index 54b6612362..cfe65015d5 100644 --- a/drivers/net/enic/base/vnic_rq.h +++ b/drivers/net/enic/base/vnic_rq.h @@ -1,108 +1,78 @@ -/* - * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. - * - * Copyright (c) 2014, Cisco Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * */ -#ident "$Id: vnic_rq.h 180262 2014-07-02 07:57:43Z gvaradar $" #ifndef _VNIC_RQ_H_ #define _VNIC_RQ_H_ +#include #include "vnic_dev.h" #include "vnic_cq.h" /* Receive queue control */ struct vnic_rq_ctrl { - u64 ring_base; /* 0x00 */ - u32 ring_size; /* 0x08 */ - u32 pad0; - u32 posted_index; /* 0x10 */ - u32 pad1; - u32 cq_index; /* 0x18 */ - u32 pad2; - u32 enable; /* 0x20 */ - u32 pad3; - u32 running; /* 0x28 */ - u32 pad4; - u32 fetch_index; /* 0x30 */ - u32 pad5; - u32 error_interrupt_enable; /* 0x38 */ - u32 pad6; - u32 error_interrupt_offset; /* 0x40 */ - u32 pad7; - u32 error_status; /* 0x48 */ - u32 pad8; - u32 dropped_packet_count; /* 0x50 */ - u32 pad9; - u32 dropped_packet_count_rc; /* 0x58 */ - u32 pad10; -}; - -/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */ -#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32 -#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64 -#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \ - ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \ - VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES)) -#define VNIC_RQ_BUF_BLK_SZ(entries) \ - (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf)) -#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ - DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries)) -#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) - -struct vnic_rq_buf { - struct vnic_rq_buf *next; - dma_addr_t dma_addr; - void *os_buf; - unsigned int os_buf_index; - unsigned int len; - unsigned int index; - void *desc; - uint64_t wr_id; + uint64_t ring_base; /* 0x00 */ + uint32_t ring_size; /* 0x08 */ + uint32_t pad0; + uint32_t posted_index; /* 0x10 */ + uint32_t pad1; + uint32_t cq_index; /* 0x18 */ + uint32_t pad2; + uint32_t enable; /* 0x20 */ + uint32_t pad3; + uint32_t running; /* 0x28 */ + uint32_t pad4; + uint32_t fetch_index; /* 0x30 */ + uint32_t pad5; + uint32_t error_interrupt_enable; /* 0x38 */ + uint32_t pad6; + uint32_t error_interrupt_offset; /* 0x40 */ + uint32_t pad7; + uint32_t error_status; /* 0x48 */ + uint32_t pad8; + uint32_t tcp_sn; /* 0x50 */ + uint32_t pad9; + uint32_t unused; /* 0x58 */ + uint32_t pad10; + uint32_t dca_select; /* 0x60 */ + uint32_t pad11; + uint32_t dca_value; /* 0x68 */ + uint32_t pad12; + uint32_t data_ring; /* 0x70 */ + uint32_t pad13; + uint32_t header_split; /* 0x78 */ + uint32_t pad14; }; struct vnic_rq { unsigned int index; + unsigned int posted_index; struct vnic_dev *vdev; - struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ + struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */ struct vnic_dev_ring ring; - struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX]; - struct vnic_rq_buf *to_use; - struct vnic_rq_buf *to_clean; + struct rte_mbuf **free_mbufs; /* reserve of free mbufs */ + int num_free_mbufs; + struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */ + unsigned int mbuf_next_idx; /* next mb to consume */ void *os_buf_head; unsigned int pkts_outstanding; - + uint16_t rx_nb_hold; + uint16_t rx_free_thresh; unsigned int socket_id; struct rte_mempool *mp; + uint16_t rxst_idx; + uint32_t tot_pkts; + uint16_t data_queue_idx; + uint8_t data_queue_enable; + uint8_t is_sop; + uint8_t in_use; + struct rte_mbuf *pkt_first_seg; + struct rte_mbuf *pkt_last_seg; + unsigned int max_mbufs_per_pkt; + uint16_t tot_nb_desc; + bool need_initial_post; }; static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) @@ -117,119 +87,13 @@ static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) return rq->ring.desc_count - rq->ring.desc_avail - 1; } -static inline void *vnic_rq_next_desc(struct vnic_rq *rq) -{ - return rq->to_use->desc; -} - -static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) -{ - return rq->to_use->index; -} - -static inline void vnic_rq_post(struct vnic_rq *rq, - void *os_buf, unsigned int os_buf_index, - dma_addr_t dma_addr, unsigned int len, - uint64_t wrid) -{ - struct vnic_rq_buf *buf = rq->to_use; - - buf->os_buf = os_buf; - buf->os_buf_index = os_buf_index; - buf->dma_addr = dma_addr; - buf->len = len; - buf->wr_id = wrid; - - buf = buf->next; - rq->to_use = buf; - rq->ring.desc_avail--; - - /* Move the posted_index every nth descriptor - */ - -#ifndef VNIC_RQ_RETURN_RATE -#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ -#endif - - if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &rq->ctrl->posted_index); - } -} - -static inline void vnic_rq_post_commit(struct vnic_rq *rq, - void *os_buf, unsigned int os_buf_index, - dma_addr_t dma_addr, unsigned int len) -{ - struct vnic_rq_buf *buf = rq->to_use; - - buf->os_buf = os_buf; - buf->os_buf_index = os_buf_index; - buf->dma_addr = dma_addr; - buf->len = len; - - buf = buf->next; - rq->to_use = buf; - rq->ring.desc_avail--; - - /* Move the posted_index every descriptor - */ - - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &rq->ctrl->posted_index); -} -static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) -{ - rq->ring.desc_avail += count; -} enum desc_return_options { VNIC_RQ_RETURN_DESC, VNIC_RQ_DEFER_RETURN_DESC, }; -static inline int vnic_rq_service(struct vnic_rq *rq, - struct cq_desc *cq_desc, u16 completed_index, - int desc_return, int (*buf_service)(struct vnic_rq *rq, - struct cq_desc *cq_desc, struct vnic_rq_buf *buf, - int skipped, void *opaque), void *opaque) -{ - struct vnic_rq_buf *buf; - int skipped; - int eop = 0; - - buf = rq->to_clean; - while (1) { - - skipped = (buf->index != completed_index); - - if ((*buf_service)(rq, cq_desc, buf, skipped, opaque)) - eop++; - - if (desc_return == VNIC_RQ_RETURN_DESC) - rq->ring.desc_avail++; - - rq->to_clean = buf->next; - - if (!skipped) - break; - - buf = rq->to_clean; - } - return eop; -} - static inline int vnic_rq_fill(struct vnic_rq *rq, int (*buf_fill)(struct vnic_rq *rq)) { @@ -275,8 +139,5 @@ unsigned int vnic_rq_error_status(struct vnic_rq *rq); void vnic_rq_enable(struct vnic_rq *rq); int vnic_rq_disable(struct vnic_rq *rq); void vnic_rq_clean(struct vnic_rq *rq, - void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)); -int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count, - unsigned int desc_size); - + void (*buf_clean)(struct rte_mbuf **buf)); #endif /* _VNIC_RQ_H_ */