2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * Copyright (c) 2014, Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
43 #include <rte_memzone.h>
44 #include <rte_malloc.h>
46 #include <rte_string_fns.h>
47 #include <rte_ethdev.h>
49 #include "enic_compat.h"
51 #include "wq_enet_desc.h"
52 #include "rq_enet_desc.h"
53 #include "cq_enet_desc.h"
54 #include "vnic_enet.h"
59 #include "vnic_intr.h"
62 static inline int enic_is_sriov_vf(struct enic *enic)
64 return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
67 static int is_zero_addr(uint8_t *addr)
69 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
72 static int is_mcast_addr(uint8_t *addr)
77 static int is_eth_addr_valid(uint8_t *addr)
79 return !is_mcast_addr(addr) && !is_zero_addr(addr);
83 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
87 if (!rq || !rq->mbuf_ring) {
88 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
92 for (i = 0; i < rq->ring.desc_count; i++) {
93 if (rq->mbuf_ring[i]) {
94 rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
95 rq->mbuf_ring[i] = NULL;
100 void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
102 vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
105 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
107 struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
109 rte_pktmbuf_free_seg(mbuf);
113 static void enic_log_q_error(struct enic *enic)
118 for (i = 0; i < enic->wq_count; i++) {
119 error_status = vnic_wq_error_status(&enic->wq[i]);
121 dev_err(enic, "WQ[%d] error_status %d\n", i,
125 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
126 if (!enic->rq[i].in_use)
128 error_status = vnic_rq_error_status(&enic->rq[i]);
130 dev_err(enic, "RQ[%d] error_status %d\n", i,
135 static void enic_clear_soft_stats(struct enic *enic)
137 struct enic_soft_stats *soft_stats = &enic->soft_stats;
138 rte_atomic64_clear(&soft_stats->rx_nombuf);
139 rte_atomic64_clear(&soft_stats->rx_packet_errors);
142 static void enic_init_soft_stats(struct enic *enic)
144 struct enic_soft_stats *soft_stats = &enic->soft_stats;
145 rte_atomic64_init(&soft_stats->rx_nombuf);
146 rte_atomic64_init(&soft_stats->rx_packet_errors);
147 enic_clear_soft_stats(enic);
150 void enic_dev_stats_clear(struct enic *enic)
152 if (vnic_dev_stats_clear(enic->vdev))
153 dev_err(enic, "Error in clearing stats\n");
154 enic_clear_soft_stats(enic);
157 void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
159 struct vnic_stats *stats;
160 struct enic_soft_stats *soft_stats = &enic->soft_stats;
161 int64_t rx_truncated;
162 uint64_t rx_packet_errors;
164 if (vnic_dev_stats_dump(enic->vdev, &stats)) {
165 dev_err(enic, "Error in getting stats\n");
169 /* The number of truncated packets can only be calculated by
170 * subtracting a hardware counter from error packets received by
171 * the driver. Note: this causes transient inaccuracies in the
172 * ipackets count. Also, the length of truncated packets are
173 * counted in ibytes even though truncated packets are dropped
174 * which can make ibytes be slightly higher than it should be.
176 rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
177 rx_truncated = rx_packet_errors - stats->rx.rx_errors -
178 stats->rx.rx_no_bufs;
180 r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
181 r_stats->opackets = stats->tx.tx_frames_ok;
183 r_stats->ibytes = stats->rx.rx_bytes_ok;
184 r_stats->obytes = stats->tx.tx_bytes_ok;
186 r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
187 r_stats->oerrors = stats->tx.tx_errors;
189 r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
191 r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
194 void enic_del_mac_address(struct enic *enic)
196 if (vnic_dev_del_addr(enic->vdev, enic->mac_addr))
197 dev_err(enic, "del mac addr failed\n");
200 void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
204 if (!is_eth_addr_valid(mac_addr)) {
205 dev_err(enic, "invalid mac address\n");
209 err = vnic_dev_del_addr(enic->vdev, enic->mac_addr);
211 dev_err(enic, "del mac addr failed\n");
215 ether_addr_copy((struct ether_addr *)mac_addr,
216 (struct ether_addr *)enic->mac_addr);
218 err = vnic_dev_add_addr(enic->vdev, mac_addr);
220 dev_err(enic, "add mac addr failed\n");
226 enic_free_rq_buf(struct rte_mbuf **mbuf)
231 rte_pktmbuf_free(*mbuf);
235 void enic_init_vnic_resources(struct enic *enic)
237 unsigned int error_interrupt_enable = 1;
238 unsigned int error_interrupt_offset = 0;
239 unsigned int index = 0;
241 struct vnic_rq *data_rq;
243 for (index = 0; index < enic->rq_count; index++) {
244 cq_idx = enic_cq_rq(enic, enic_sop_rq(index));
246 vnic_rq_init(&enic->rq[enic_sop_rq(index)],
248 error_interrupt_enable,
249 error_interrupt_offset);
251 data_rq = &enic->rq[enic_data_rq(index)];
253 vnic_rq_init(data_rq,
255 error_interrupt_enable,
256 error_interrupt_offset);
258 vnic_cq_init(&enic->cq[cq_idx],
259 0 /* flow_control_enable */,
260 1 /* color_enable */,
263 1 /* cq_tail_color */,
264 0 /* interrupt_enable */,
265 1 /* cq_entry_enable */,
266 0 /* cq_message_enable */,
267 0 /* interrupt offset */,
268 0 /* cq_message_addr */);
271 for (index = 0; index < enic->wq_count; index++) {
272 vnic_wq_init(&enic->wq[index],
273 enic_cq_wq(enic, index),
274 error_interrupt_enable,
275 error_interrupt_offset);
277 cq_idx = enic_cq_wq(enic, index);
278 vnic_cq_init(&enic->cq[cq_idx],
279 0 /* flow_control_enable */,
280 1 /* color_enable */,
283 1 /* cq_tail_color */,
284 0 /* interrupt_enable */,
285 0 /* cq_entry_enable */,
286 1 /* cq_message_enable */,
287 0 /* interrupt offset */,
288 (u64)enic->wq[index].cqmsg_rz->phys_addr);
291 vnic_intr_init(&enic->intr,
292 enic->config.intr_timer_usec,
293 enic->config.intr_timer_type,
294 /*mask_on_assertion*/1);
299 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
302 struct rq_enet_desc *rqd = rq->ring.descs;
309 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
310 rq->ring.desc_count);
312 for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
313 mb = rte_mbuf_raw_alloc(rq->mp);
315 dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
316 (unsigned)rq->index);
320 mb->data_off = RTE_PKTMBUF_HEADROOM;
321 dma_addr = (dma_addr_t)(mb->buf_physaddr
322 + RTE_PKTMBUF_HEADROOM);
323 rq_enet_desc_enc(rqd, dma_addr,
324 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
325 : RQ_ENET_TYPE_NOT_SOP),
326 mb->buf_len - RTE_PKTMBUF_HEADROOM);
327 rq->mbuf_ring[i] = mb;
330 /* make sure all prior writes are complete before doing the PIO write */
333 /* Post all but the last buffer to VIC. */
334 rq->posted_index = rq->ring.desc_count - 1;
338 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
339 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
340 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
341 iowrite32(0, &rq->ctrl->fetch_index);
349 enic_alloc_consistent(void *priv, size_t size,
350 dma_addr_t *dma_handle, u8 *name)
353 const struct rte_memzone *rz;
355 struct enic *enic = (struct enic *)priv;
356 struct enic_memzone_entry *mze;
358 rz = rte_memzone_reserve_aligned((const char *)name,
359 size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
361 pr_err("%s : Failed to allocate memory requested for %s\n",
367 *dma_handle = (dma_addr_t)rz->phys_addr;
369 mze = rte_malloc("enic memzone entry",
370 sizeof(struct enic_memzone_entry), 0);
373 pr_err("%s : Failed to allocate memory for memzone list\n",
375 rte_memzone_free(rz);
380 rte_spinlock_lock(&enic->memzone_list_lock);
381 LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
382 rte_spinlock_unlock(&enic->memzone_list_lock);
388 enic_free_consistent(void *priv,
389 __rte_unused size_t size,
391 dma_addr_t dma_handle)
393 struct enic_memzone_entry *mze;
394 struct enic *enic = (struct enic *)priv;
396 rte_spinlock_lock(&enic->memzone_list_lock);
397 LIST_FOREACH(mze, &enic->memzone_list, entries) {
398 if (mze->rz->addr == vaddr &&
399 mze->rz->phys_addr == dma_handle)
403 rte_spinlock_unlock(&enic->memzone_list_lock);
405 "Tried to free memory, but couldn't find it in the memzone list\n");
408 LIST_REMOVE(mze, entries);
409 rte_spinlock_unlock(&enic->memzone_list_lock);
410 rte_memzone_free(mze->rz);
414 int enic_link_update(struct enic *enic)
416 struct rte_eth_dev *eth_dev = enic->rte_dev;
420 link_status = enic_get_link_status(enic);
421 ret = (link_status == enic->link_status);
422 enic->link_status = link_status;
423 eth_dev->data->dev_link.link_status = link_status;
424 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
425 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
430 enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
433 struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
434 struct enic *enic = pmd_priv(dev);
436 vnic_intr_return_all_credits(&enic->intr);
438 enic_link_update(enic);
439 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
440 enic_log_q_error(enic);
443 int enic_enable(struct enic *enic)
447 struct rte_eth_dev *eth_dev = enic->rte_dev;
449 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
450 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
452 /* vnic notification of link status has already been turned on in
453 * enic_dev_init() which is called during probe time. Here we are
454 * just turning on interrupt vector 0 if needed.
456 if (eth_dev->data->dev_conf.intr_conf.lsc)
457 vnic_dev_notify_set(enic->vdev, 0);
459 if (enic_clsf_init(enic))
460 dev_warning(enic, "Init of hash table for clsf failed."\
461 "Flow director feature will not work\n");
463 for (index = 0; index < enic->rq_count; index++) {
464 err = enic_alloc_rx_queue_mbufs(enic,
465 &enic->rq[enic_sop_rq(index)]);
467 dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
470 err = enic_alloc_rx_queue_mbufs(enic,
471 &enic->rq[enic_data_rq(index)]);
473 /* release the allocated mbufs for the sop rq*/
474 enic_rxmbuf_queue_release(enic,
475 &enic->rq[enic_sop_rq(index)]);
477 dev_err(enic, "Failed to alloc data RX queue mbufs\n");
482 for (index = 0; index < enic->wq_count; index++)
483 enic_start_wq(enic, index);
484 for (index = 0; index < enic->rq_count; index++)
485 enic_start_rq(enic, index);
487 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
489 vnic_dev_enable_wait(enic->vdev);
491 /* Register and enable error interrupt */
492 rte_intr_callback_register(&(enic->pdev->intr_handle),
493 enic_intr_handler, (void *)enic->rte_dev);
495 rte_intr_enable(&(enic->pdev->intr_handle));
496 vnic_intr_unmask(&enic->intr);
501 int enic_alloc_intr_resources(struct enic *enic)
505 dev_info(enic, "vNIC resources used: "\
506 "wq %d rq %d cq %d intr %d\n",
507 enic->wq_count, enic_vnic_rq_count(enic),
508 enic->cq_count, enic->intr_count);
510 err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
512 enic_free_vnic_resources(enic);
517 void enic_free_rq(void *rxq)
519 struct vnic_rq *rq_sop, *rq_data;
525 rq_sop = (struct vnic_rq *)rxq;
526 enic = vnic_dev_priv(rq_sop->vdev);
527 rq_data = &enic->rq[rq_sop->data_queue_idx];
529 enic_rxmbuf_queue_release(enic, rq_sop);
531 enic_rxmbuf_queue_release(enic, rq_data);
533 rte_free(rq_sop->mbuf_ring);
535 rte_free(rq_data->mbuf_ring);
537 rq_sop->mbuf_ring = NULL;
538 rq_data->mbuf_ring = NULL;
540 vnic_rq_free(rq_sop);
542 vnic_rq_free(rq_data);
544 vnic_cq_free(&enic->cq[rq_sop->index]);
547 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
549 struct rte_eth_dev *eth_dev = enic->rte_dev;
550 vnic_wq_enable(&enic->wq[queue_idx]);
551 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
554 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
556 struct rte_eth_dev *eth_dev = enic->rte_dev;
559 ret = vnic_wq_disable(&enic->wq[queue_idx]);
563 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
567 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
569 struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
570 struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
571 struct rte_eth_dev *eth_dev = enic->rte_dev;
574 vnic_rq_enable(rq_data);
576 vnic_rq_enable(rq_sop);
577 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
580 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
582 int ret1 = 0, ret2 = 0;
583 struct rte_eth_dev *eth_dev = enic->rte_dev;
584 struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
585 struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
587 ret2 = vnic_rq_disable(rq_sop);
590 ret1 = vnic_rq_disable(rq_data);
597 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
601 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
602 unsigned int socket_id, struct rte_mempool *mp,
606 uint16_t sop_queue_idx = enic_sop_rq(queue_idx);
607 uint16_t data_queue_idx = enic_data_rq(queue_idx);
608 struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
609 struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
610 unsigned int mbuf_size, mbufs_per_pkt;
611 unsigned int nb_sop_desc, nb_data_desc;
612 uint16_t min_sop, max_sop, min_data, max_data;
615 rq_sop->data_queue_idx = data_queue_idx;
617 rq_data->data_queue_idx = 0;
618 rq_sop->socket_id = socket_id;
620 rq_data->socket_id = socket_id;
624 mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
625 RTE_PKTMBUF_HEADROOM);
627 if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
628 dev_info(enic, "Scatter rx mode enabled\n");
629 /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
630 mbufs_per_pkt = ((enic->config.mtu + ETHER_HDR_LEN + 4) +
631 (mbuf_size - 1)) / mbuf_size;
633 dev_info(enic, "Scatter rx mode disabled\n");
637 if (mbufs_per_pkt > 1) {
638 dev_info(enic, "Scatter rx mode in use\n");
641 dev_info(enic, "Scatter rx mode not being used\n");
645 /* number of descriptors have to be a multiple of 32 */
646 nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
647 nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
649 rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
650 rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
652 if (mbufs_per_pkt > 1) {
654 max_sop = ((enic->config.rq_desc_count /
655 (mbufs_per_pkt - 1)) & ~0x1F);
656 min_data = min_sop * (mbufs_per_pkt - 1);
657 max_data = enic->config.rq_desc_count;
660 max_sop = enic->config.rq_desc_count;
665 if (nb_desc < (min_sop + min_data)) {
667 "Number of rx descs too low, adjusting to minimum\n");
668 nb_sop_desc = min_sop;
669 nb_data_desc = min_data;
670 } else if (nb_desc > (max_sop + max_data)) {
672 "Number of rx_descs too high, adjusting to maximum\n");
673 nb_sop_desc = max_sop;
674 nb_data_desc = max_data;
676 if (mbufs_per_pkt > 1) {
677 dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
678 enic->config.mtu, mbuf_size, min_sop + min_data,
681 dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
682 nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
684 /* Allocate sop queue resources */
685 rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
686 nb_sop_desc, sizeof(struct rq_enet_desc));
688 dev_err(enic, "error in allocation of sop rq\n");
691 nb_sop_desc = rq_sop->ring.desc_count;
693 if (rq_data->in_use) {
694 /* Allocate data queue resources */
695 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
697 sizeof(struct rq_enet_desc));
699 dev_err(enic, "error in allocation of data rq\n");
700 goto err_free_rq_sop;
702 nb_data_desc = rq_data->ring.desc_count;
704 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
705 socket_id, nb_sop_desc + nb_data_desc,
706 sizeof(struct cq_enet_rq_desc));
708 dev_err(enic, "error in allocation of cq for rq\n");
709 goto err_free_rq_data;
712 /* Allocate the mbuf rings */
713 rq_sop->mbuf_ring = (struct rte_mbuf **)
714 rte_zmalloc_socket("rq->mbuf_ring",
715 sizeof(struct rte_mbuf *) * nb_sop_desc,
716 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
717 if (rq_sop->mbuf_ring == NULL)
720 if (rq_data->in_use) {
721 rq_data->mbuf_ring = (struct rte_mbuf **)
722 rte_zmalloc_socket("rq->mbuf_ring",
723 sizeof(struct rte_mbuf *) * nb_data_desc,
724 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
725 if (rq_data->mbuf_ring == NULL)
726 goto err_free_sop_mbuf;
732 rte_free(rq_sop->mbuf_ring);
734 /* cleanup on error */
735 vnic_cq_free(&enic->cq[queue_idx]);
738 vnic_rq_free(rq_data);
740 vnic_rq_free(rq_sop);
745 void enic_free_wq(void *txq)
753 wq = (struct vnic_wq *)txq;
754 enic = vnic_dev_priv(wq->vdev);
755 rte_memzone_free(wq->cqmsg_rz);
757 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
760 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
761 unsigned int socket_id, uint16_t nb_desc)
764 struct vnic_wq *wq = &enic->wq[queue_idx];
765 unsigned int cq_index = enic_cq_wq(enic, queue_idx);
769 wq->socket_id = socket_id;
771 if (nb_desc > enic->config.wq_desc_count) {
773 "WQ %d - number of tx desc in cmd line (%d)"\
774 "is greater than that in the UCSM/CIMC adapter"\
775 "policy. Applying the value in the adapter "\
777 queue_idx, nb_desc, enic->config.wq_desc_count);
778 } else if (nb_desc != enic->config.wq_desc_count) {
779 enic->config.wq_desc_count = nb_desc;
781 "TX Queues - effective number of descs:%d\n",
786 /* Allocate queue resources */
787 err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
788 enic->config.wq_desc_count,
789 sizeof(struct wq_enet_desc));
791 dev_err(enic, "error in allocation of wq\n");
795 err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
796 socket_id, enic->config.wq_desc_count,
797 sizeof(struct cq_enet_wq_desc));
800 dev_err(enic, "error in allocation of cq for wq\n");
803 /* setup up CQ message */
804 snprintf((char *)name, sizeof(name),
805 "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
808 wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
818 int enic_disable(struct enic *enic)
823 vnic_intr_mask(&enic->intr);
824 (void)vnic_intr_masked(&enic->intr); /* flush write */
825 rte_intr_disable(&enic->pdev->intr_handle);
826 rte_intr_callback_unregister(&enic->pdev->intr_handle,
828 (void *)enic->rte_dev);
830 vnic_dev_disable(enic->vdev);
832 enic_clsf_destroy(enic);
834 if (!enic_is_sriov_vf(enic))
835 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
837 for (i = 0; i < enic->wq_count; i++) {
838 err = vnic_wq_disable(&enic->wq[i]);
842 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
843 if (enic->rq[i].in_use) {
844 err = vnic_rq_disable(&enic->rq[i]);
850 /* If we were using interrupts, set the interrupt vector to -1
851 * to disable interrupts. We are not disabling link notifcations,
852 * though, as we want the polling of link status to continue working.
854 if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
855 vnic_dev_notify_set(enic->vdev, -1);
857 vnic_dev_set_reset_flag(enic->vdev, 1);
859 for (i = 0; i < enic->wq_count; i++)
860 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
862 for (i = 0; i < enic_vnic_rq_count(enic); i++)
863 if (enic->rq[i].in_use)
864 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
865 for (i = 0; i < enic->cq_count; i++)
866 vnic_cq_clean(&enic->cq[i]);
867 vnic_intr_clean(&enic->intr);
872 static int enic_dev_wait(struct vnic_dev *vdev,
873 int (*start)(struct vnic_dev *, int),
874 int (*finished)(struct vnic_dev *, int *),
881 err = start(vdev, arg);
885 /* Wait for func to complete...2 seconds max */
886 for (i = 0; i < 2000; i++) {
887 err = finished(vdev, &done);
897 static int enic_dev_open(struct enic *enic)
901 err = enic_dev_wait(enic->vdev, vnic_dev_open,
902 vnic_dev_open_done, 0);
904 dev_err(enic_get_dev(enic),
905 "vNIC device open failed, err %d\n", err);
910 static int enic_set_rsskey(struct enic *enic)
912 dma_addr_t rss_key_buf_pa;
913 union vnic_rss_key *rss_key_buf_va = NULL;
914 static union vnic_rss_key rss_key = {
916 [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
917 [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
918 [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
919 [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
925 snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
926 rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
927 &rss_key_buf_pa, name);
931 rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
933 err = enic_set_rss_key(enic,
935 sizeof(union vnic_rss_key));
937 enic_free_consistent(enic, sizeof(union vnic_rss_key),
938 rss_key_buf_va, rss_key_buf_pa);
943 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
945 dma_addr_t rss_cpu_buf_pa;
946 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
951 snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
952 rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
953 &rss_cpu_buf_pa, name);
957 for (i = 0; i < (1 << rss_hash_bits); i++)
958 (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
959 enic_sop_rq(i % enic->rq_count);
961 err = enic_set_rss_cpu(enic,
963 sizeof(union vnic_rss_cpu));
965 enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
966 rss_cpu_buf_va, rss_cpu_buf_pa);
971 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
972 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
974 const u8 tso_ipid_split_en = 0;
977 /* Enable VLAN tag stripping */
979 err = enic_set_nic_cfg(enic,
980 rss_default_cpu, rss_hash_type,
981 rss_hash_bits, rss_base_cpu,
982 rss_enable, tso_ipid_split_en,
983 enic->ig_vlan_strip_en);
988 int enic_set_rss_nic_cfg(struct enic *enic)
990 const u8 rss_default_cpu = 0;
991 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
992 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
993 NIC_CFG_RSS_HASH_TYPE_IPV6 |
994 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
995 const u8 rss_hash_bits = 7;
996 const u8 rss_base_cpu = 0;
997 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
1000 if (!enic_set_rsskey(enic)) {
1001 if (enic_set_rsscpu(enic, rss_hash_bits)) {
1003 dev_warning(enic, "RSS disabled, "\
1004 "Failed to set RSS cpu indirection table.");
1009 "RSS disabled, Failed to set RSS key.\n");
1013 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
1014 rss_hash_bits, rss_base_cpu, rss_enable);
1017 int enic_setup_finish(struct enic *enic)
1021 enic_init_soft_stats(enic);
1023 ret = enic_set_rss_nic_cfg(enic);
1025 dev_err(enic, "Failed to config nic, aborting.\n");
1030 vnic_dev_packet_filter(enic->vdev,
1043 void enic_add_packet_filter(struct enic *enic)
1045 /* Args -> directed, multicast, broadcast, promisc, allmulti */
1046 vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1047 enic->promisc, enic->allmulti);
1050 int enic_get_link_status(struct enic *enic)
1052 return vnic_dev_link_status(enic->vdev);
1055 static void enic_dev_deinit(struct enic *enic)
1057 struct rte_eth_dev *eth_dev = enic->rte_dev;
1059 /* stop link status checking */
1060 vnic_dev_notify_unset(enic->vdev);
1062 rte_free(eth_dev->data->mac_addrs);
1066 int enic_set_vnic_res(struct enic *enic)
1068 struct rte_eth_dev *eth_dev = enic->rte_dev;
1071 /* With Rx scatter support, two RQs are now used per RQ used by
1074 if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
1075 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1076 eth_dev->data->nb_rx_queues,
1077 eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
1080 if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
1081 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1082 eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1086 if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
1087 eth_dev->data->nb_tx_queues)) {
1088 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1089 (eth_dev->data->nb_rx_queues +
1090 eth_dev->data->nb_tx_queues), enic->conf_cq_count);
1095 enic->rq_count = eth_dev->data->nb_rx_queues;
1096 enic->wq_count = eth_dev->data->nb_tx_queues;
1097 enic->cq_count = enic->rq_count + enic->wq_count;
1103 /* The Cisco NIC can send and receive packets up to a max packet size
1104 * determined by the NIC type and firmware. There is also an MTU
1105 * configured into the NIC via the CIMC/UCSM management interface
1106 * which can be overridden by this function (up to the max packet size).
1107 * Depending on the network setup, doing so may cause packet drops
1108 * and unexpected behavior.
1110 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1112 uint16_t old_mtu; /* previous setting */
1113 uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
1114 struct rte_eth_dev *eth_dev = enic->rte_dev;
1116 old_mtu = eth_dev->data->mtu;
1117 config_mtu = enic->config.mtu;
1119 /* only works with Rx scatter disabled */
1120 if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter)
1123 if (new_mtu > enic->max_mtu) {
1125 "MTU not updated: requested (%u) greater than max (%u)\n",
1126 new_mtu, enic->max_mtu);
1129 if (new_mtu < ENIC_MIN_MTU) {
1131 "MTU not updated: requested (%u) less than min (%u)\n",
1132 new_mtu, ENIC_MIN_MTU);
1135 if (new_mtu > config_mtu)
1137 "MTU (%u) is greater than value configured in NIC (%u)\n",
1138 new_mtu, config_mtu);
1140 /* update the mtu */
1141 eth_dev->data->mtu = new_mtu;
1143 dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
1147 static int enic_dev_init(struct enic *enic)
1150 struct rte_eth_dev *eth_dev = enic->rte_dev;
1152 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1154 /* Get vNIC configuration
1156 err = enic_get_vnic_config(enic);
1158 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1162 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
1163 if (!eth_dev->data->mac_addrs) {
1164 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1167 ether_addr_copy((struct ether_addr *) enic->mac_addr,
1168 ð_dev->data->mac_addrs[0]);
1171 /* Get available resource counts
1173 enic_get_res_counts(enic);
1175 vnic_dev_set_reset_flag(enic->vdev, 0);
1177 /* set up link status checking */
1178 vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1184 int enic_probe(struct enic *enic)
1186 struct rte_pci_device *pdev = enic->pdev;
1189 dev_debug(enic, " Initializing ENIC PMD\n");
1191 enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1192 enic->bar0.len = pdev->mem_resource[0].len;
1194 /* Register vNIC device */
1195 enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1197 dev_err(enic, "vNIC registration failed, aborting\n");
1201 LIST_INIT(&enic->memzone_list);
1202 rte_spinlock_init(&enic->memzone_list_lock);
1204 vnic_register_cbacks(enic->vdev,
1205 enic_alloc_consistent,
1206 enic_free_consistent);
1208 /* Issue device open to get device in known state */
1209 err = enic_dev_open(enic);
1211 dev_err(enic, "vNIC dev open failed, aborting\n");
1212 goto err_out_unregister;
1215 /* Set ingress vlan rewrite mode before vnic initialization */
1216 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1217 IG_VLAN_REWRITE_MODE_PASS_THRU);
1220 "Failed to set ingress vlan rewrite mode, aborting.\n");
1221 goto err_out_dev_close;
1224 /* Issue device init to initialize the vnic-to-switch link.
1225 * We'll start with carrier off and wait for link UP
1226 * notification later to turn on carrier. We don't need
1227 * to wait here for the vnic-to-switch link initialization
1228 * to complete; link UP notification is the indication that
1229 * the process is complete.
1232 err = vnic_dev_init(enic->vdev, 0);
1234 dev_err(enic, "vNIC dev init failed, aborting\n");
1235 goto err_out_dev_close;
1238 err = enic_dev_init(enic);
1240 dev_err(enic, "Device initialization failed, aborting\n");
1241 goto err_out_dev_close;
1247 vnic_dev_close(enic->vdev);
1249 vnic_dev_unregister(enic->vdev);
1254 void enic_remove(struct enic *enic)
1256 enic_dev_deinit(enic);
1257 vnic_dev_close(enic->vdev);
1258 vnic_dev_unregister(enic->vdev);