1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
14 #include <rte_bus_pci.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
18 #include <rte_string_fns.h>
19 #include <rte_ethdev_driver.h>
21 #include "enic_compat.h"
23 #include "wq_enet_desc.h"
24 #include "rq_enet_desc.h"
25 #include "cq_enet_desc.h"
26 #include "vnic_enet.h"
31 #include "vnic_intr.h"
34 static inline int enic_is_sriov_vf(struct enic *enic)
36 return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
39 static int is_zero_addr(uint8_t *addr)
41 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
44 static int is_mcast_addr(uint8_t *addr)
49 static int is_eth_addr_valid(uint8_t *addr)
51 return !is_mcast_addr(addr) && !is_zero_addr(addr);
55 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
59 if (!rq || !rq->mbuf_ring) {
60 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
64 for (i = 0; i < rq->ring.desc_count; i++) {
65 if (rq->mbuf_ring[i]) {
66 rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
67 rq->mbuf_ring[i] = NULL;
72 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
74 struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
76 rte_pktmbuf_free_seg(mbuf);
80 static void enic_log_q_error(struct enic *enic)
85 for (i = 0; i < enic->wq_count; i++) {
86 error_status = vnic_wq_error_status(&enic->wq[i]);
88 dev_err(enic, "WQ[%d] error_status %d\n", i,
92 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
93 if (!enic->rq[i].in_use)
95 error_status = vnic_rq_error_status(&enic->rq[i]);
97 dev_err(enic, "RQ[%d] error_status %d\n", i,
102 static void enic_clear_soft_stats(struct enic *enic)
104 struct enic_soft_stats *soft_stats = &enic->soft_stats;
105 rte_atomic64_clear(&soft_stats->rx_nombuf);
106 rte_atomic64_clear(&soft_stats->rx_packet_errors);
107 rte_atomic64_clear(&soft_stats->tx_oversized);
110 static void enic_init_soft_stats(struct enic *enic)
112 struct enic_soft_stats *soft_stats = &enic->soft_stats;
113 rte_atomic64_init(&soft_stats->rx_nombuf);
114 rte_atomic64_init(&soft_stats->rx_packet_errors);
115 rte_atomic64_init(&soft_stats->tx_oversized);
116 enic_clear_soft_stats(enic);
119 void enic_dev_stats_clear(struct enic *enic)
121 if (vnic_dev_stats_clear(enic->vdev))
122 dev_err(enic, "Error in clearing stats\n");
123 enic_clear_soft_stats(enic);
126 int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
128 struct vnic_stats *stats;
129 struct enic_soft_stats *soft_stats = &enic->soft_stats;
130 int64_t rx_truncated;
131 uint64_t rx_packet_errors;
132 int ret = vnic_dev_stats_dump(enic->vdev, &stats);
135 dev_err(enic, "Error in getting stats\n");
139 /* The number of truncated packets can only be calculated by
140 * subtracting a hardware counter from error packets received by
141 * the driver. Note: this causes transient inaccuracies in the
142 * ipackets count. Also, the length of truncated packets are
143 * counted in ibytes even though truncated packets are dropped
144 * which can make ibytes be slightly higher than it should be.
146 rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
147 rx_truncated = rx_packet_errors - stats->rx.rx_errors;
149 r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
150 r_stats->opackets = stats->tx.tx_frames_ok;
152 r_stats->ibytes = stats->rx.rx_bytes_ok;
153 r_stats->obytes = stats->tx.tx_bytes_ok;
155 r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
156 r_stats->oerrors = stats->tx.tx_errors
157 + rte_atomic64_read(&soft_stats->tx_oversized);
159 r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
161 r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
165 int enic_del_mac_address(struct enic *enic, int mac_index)
167 struct rte_eth_dev *eth_dev = enic->rte_dev;
168 uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
170 return vnic_dev_del_addr(enic->vdev, mac_addr);
173 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
177 if (!is_eth_addr_valid(mac_addr)) {
178 dev_err(enic, "invalid mac address\n");
182 err = vnic_dev_add_addr(enic->vdev, mac_addr);
184 dev_err(enic, "add mac addr failed\n");
189 enic_free_rq_buf(struct rte_mbuf **mbuf)
194 rte_pktmbuf_free(*mbuf);
198 void enic_init_vnic_resources(struct enic *enic)
200 unsigned int error_interrupt_enable = 1;
201 unsigned int error_interrupt_offset = 0;
202 unsigned int rxq_interrupt_enable = 0;
203 unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
204 unsigned int index = 0;
206 struct vnic_rq *data_rq;
208 if (enic->rte_dev->data->dev_conf.intr_conf.rxq)
209 rxq_interrupt_enable = 1;
211 for (index = 0; index < enic->rq_count; index++) {
212 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
214 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
216 error_interrupt_enable,
217 error_interrupt_offset);
219 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
221 vnic_rq_init(data_rq,
223 error_interrupt_enable,
224 error_interrupt_offset);
226 vnic_cq_init(&enic->cq[cq_idx],
227 0 /* flow_control_enable */,
228 1 /* color_enable */,
231 1 /* cq_tail_color */,
232 rxq_interrupt_enable,
233 1 /* cq_entry_enable */,
234 0 /* cq_message_enable */,
235 rxq_interrupt_offset,
236 0 /* cq_message_addr */);
237 if (rxq_interrupt_enable)
238 rxq_interrupt_offset++;
241 for (index = 0; index < enic->wq_count; index++) {
242 vnic_wq_init(&enic->wq[index],
243 enic_cq_wq(enic, index),
244 error_interrupt_enable,
245 error_interrupt_offset);
246 /* Compute unsupported ol flags for enic_prep_pkts() */
247 enic->wq[index].tx_offload_notsup_mask =
248 PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
250 cq_idx = enic_cq_wq(enic, index);
251 vnic_cq_init(&enic->cq[cq_idx],
252 0 /* flow_control_enable */,
253 1 /* color_enable */,
256 1 /* cq_tail_color */,
257 0 /* interrupt_enable */,
258 0 /* cq_entry_enable */,
259 1 /* cq_message_enable */,
260 0 /* interrupt offset */,
261 (u64)enic->wq[index].cqmsg_rz->iova);
264 for (index = 0; index < enic->intr_count; index++) {
265 vnic_intr_init(&enic->intr[index],
266 enic->config.intr_timer_usec,
267 enic->config.intr_timer_type,
268 /*mask_on_assertion*/1);
274 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
277 struct rq_enet_desc *rqd = rq->ring.descs;
280 uint32_t max_rx_pkt_len;
286 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
287 rq->ring.desc_count);
290 * If *not* using scatter and the mbuf size is smaller than the
291 * requested max packet size (max_rx_pkt_len), then reduce the
292 * posted buffer size to max_rx_pkt_len. HW still receives packets
293 * larger than max_rx_pkt_len, but they will be truncated, which we
294 * drop in the rx handler. Not ideal, but better than returning
295 * large packets when the user is not expecting them.
297 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
298 rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
299 if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
300 rq_buf_len = max_rx_pkt_len;
301 for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
302 mb = rte_mbuf_raw_alloc(rq->mp);
304 dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
305 (unsigned)rq->index);
309 mb->data_off = RTE_PKTMBUF_HEADROOM;
310 dma_addr = (dma_addr_t)(mb->buf_iova
311 + RTE_PKTMBUF_HEADROOM);
312 rq_enet_desc_enc(rqd, dma_addr,
313 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
314 : RQ_ENET_TYPE_NOT_SOP),
316 rq->mbuf_ring[i] = mb;
319 /* make sure all prior writes are complete before doing the PIO write */
322 /* Post all but the last buffer to VIC. */
323 rq->posted_index = rq->ring.desc_count - 1;
327 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
328 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
329 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
330 iowrite32(0, &rq->ctrl->fetch_index);
338 enic_alloc_consistent(void *priv, size_t size,
339 dma_addr_t *dma_handle, u8 *name)
342 const struct rte_memzone *rz;
344 struct enic *enic = (struct enic *)priv;
345 struct enic_memzone_entry *mze;
347 rz = rte_memzone_reserve_aligned((const char *)name, size,
348 SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
350 pr_err("%s : Failed to allocate memory requested for %s\n",
356 *dma_handle = (dma_addr_t)rz->iova;
358 mze = rte_malloc("enic memzone entry",
359 sizeof(struct enic_memzone_entry), 0);
362 pr_err("%s : Failed to allocate memory for memzone list\n",
364 rte_memzone_free(rz);
370 rte_spinlock_lock(&enic->memzone_list_lock);
371 LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
372 rte_spinlock_unlock(&enic->memzone_list_lock);
378 enic_free_consistent(void *priv,
379 __rte_unused size_t size,
381 dma_addr_t dma_handle)
383 struct enic_memzone_entry *mze;
384 struct enic *enic = (struct enic *)priv;
386 rte_spinlock_lock(&enic->memzone_list_lock);
387 LIST_FOREACH(mze, &enic->memzone_list, entries) {
388 if (mze->rz->addr == vaddr &&
389 mze->rz->iova == dma_handle)
393 rte_spinlock_unlock(&enic->memzone_list_lock);
395 "Tried to free memory, but couldn't find it in the memzone list\n");
398 LIST_REMOVE(mze, entries);
399 rte_spinlock_unlock(&enic->memzone_list_lock);
400 rte_memzone_free(mze->rz);
404 int enic_link_update(struct enic *enic)
406 struct rte_eth_dev *eth_dev = enic->rte_dev;
407 struct rte_eth_link link;
409 memset(&link, 0, sizeof(link));
410 link.link_status = enic_get_link_status(enic);
411 link.link_duplex = ETH_LINK_FULL_DUPLEX;
412 link.link_speed = vnic_dev_port_speed(enic->vdev);
414 return rte_eth_linkstatus_set(eth_dev, &link);
418 enic_intr_handler(void *arg)
420 struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
421 struct enic *enic = pmd_priv(dev);
423 vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
425 enic_link_update(enic);
426 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
427 enic_log_q_error(enic);
430 static int enic_rxq_intr_init(struct enic *enic)
432 struct rte_intr_handle *intr_handle;
433 uint32_t rxq_intr_count, i;
436 intr_handle = enic->rte_dev->intr_handle;
437 if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
440 * Rx queue interrupts only work when we have MSI-X interrupts,
441 * one per queue. Sharing one interrupt is technically
442 * possible with VIC, but it is not worth the complications it brings.
444 if (!rte_intr_cap_multiple(intr_handle)) {
445 dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
446 " (vfio-pci driver)\n");
449 rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
450 err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
452 dev_err(enic, "Failed to enable event fds for Rx queue"
456 intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
457 rxq_intr_count * sizeof(int), 0);
458 if (intr_handle->intr_vec == NULL) {
459 dev_err(enic, "Failed to allocate intr_vec\n");
462 for (i = 0; i < rxq_intr_count; i++)
463 intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
467 static void enic_rxq_intr_deinit(struct enic *enic)
469 struct rte_intr_handle *intr_handle;
471 intr_handle = enic->rte_dev->intr_handle;
472 rte_intr_efd_disable(intr_handle);
473 if (intr_handle->intr_vec != NULL) {
474 rte_free(intr_handle->intr_vec);
475 intr_handle->intr_vec = NULL;
479 int enic_enable(struct enic *enic)
483 struct rte_eth_dev *eth_dev = enic->rte_dev;
485 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
486 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
488 /* vnic notification of link status has already been turned on in
489 * enic_dev_init() which is called during probe time. Here we are
490 * just turning on interrupt vector 0 if needed.
492 if (eth_dev->data->dev_conf.intr_conf.lsc)
493 vnic_dev_notify_set(enic->vdev, 0);
495 err = enic_rxq_intr_init(enic);
498 if (enic_clsf_init(enic))
499 dev_warning(enic, "Init of hash table for clsf failed."\
500 "Flow director feature will not work\n");
502 for (index = 0; index < enic->rq_count; index++) {
503 err = enic_alloc_rx_queue_mbufs(enic,
504 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
506 dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
509 err = enic_alloc_rx_queue_mbufs(enic,
510 &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
512 /* release the allocated mbufs for the sop rq*/
513 enic_rxmbuf_queue_release(enic,
514 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
516 dev_err(enic, "Failed to alloc data RX queue mbufs\n");
521 for (index = 0; index < enic->wq_count; index++)
522 enic_start_wq(enic, index);
523 for (index = 0; index < enic->rq_count; index++)
524 enic_start_rq(enic, index);
526 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
528 vnic_dev_enable_wait(enic->vdev);
530 /* Register and enable error interrupt */
531 rte_intr_callback_register(&(enic->pdev->intr_handle),
532 enic_intr_handler, (void *)enic->rte_dev);
534 rte_intr_enable(&(enic->pdev->intr_handle));
535 /* Unmask LSC interrupt */
536 vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
541 int enic_alloc_intr_resources(struct enic *enic)
546 dev_info(enic, "vNIC resources used: "\
547 "wq %d rq %d cq %d intr %d\n",
548 enic->wq_count, enic_vnic_rq_count(enic),
549 enic->cq_count, enic->intr_count);
551 for (i = 0; i < enic->intr_count; i++) {
552 err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
554 enic_free_vnic_resources(enic);
561 void enic_free_rq(void *rxq)
563 struct vnic_rq *rq_sop, *rq_data;
569 rq_sop = (struct vnic_rq *)rxq;
570 enic = vnic_dev_priv(rq_sop->vdev);
571 rq_data = &enic->rq[rq_sop->data_queue_idx];
573 enic_rxmbuf_queue_release(enic, rq_sop);
575 enic_rxmbuf_queue_release(enic, rq_data);
577 rte_free(rq_sop->mbuf_ring);
579 rte_free(rq_data->mbuf_ring);
581 rq_sop->mbuf_ring = NULL;
582 rq_data->mbuf_ring = NULL;
584 vnic_rq_free(rq_sop);
586 vnic_rq_free(rq_data);
588 vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
594 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
596 struct rte_eth_dev *eth_dev = enic->rte_dev;
597 vnic_wq_enable(&enic->wq[queue_idx]);
598 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
601 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
603 struct rte_eth_dev *eth_dev = enic->rte_dev;
606 ret = vnic_wq_disable(&enic->wq[queue_idx]);
610 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
614 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
616 struct vnic_rq *rq_sop;
617 struct vnic_rq *rq_data;
618 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
619 rq_data = &enic->rq[rq_sop->data_queue_idx];
620 struct rte_eth_dev *eth_dev = enic->rte_dev;
623 vnic_rq_enable(rq_data);
625 vnic_rq_enable(rq_sop);
626 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
629 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
631 int ret1 = 0, ret2 = 0;
632 struct rte_eth_dev *eth_dev = enic->rte_dev;
633 struct vnic_rq *rq_sop;
634 struct vnic_rq *rq_data;
635 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
636 rq_data = &enic->rq[rq_sop->data_queue_idx];
638 ret2 = vnic_rq_disable(rq_sop);
641 ret1 = vnic_rq_disable(rq_data);
648 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
652 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
653 unsigned int socket_id, struct rte_mempool *mp,
654 uint16_t nb_desc, uint16_t free_thresh)
657 uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
658 uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
659 struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
660 struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
661 unsigned int mbuf_size, mbufs_per_pkt;
662 unsigned int nb_sop_desc, nb_data_desc;
663 uint16_t min_sop, max_sop, min_data, max_data;
664 uint32_t max_rx_pkt_len;
667 rq_sop->data_queue_idx = data_queue_idx;
669 rq_data->data_queue_idx = 0;
670 rq_sop->socket_id = socket_id;
672 rq_data->socket_id = socket_id;
675 rq_sop->rx_free_thresh = free_thresh;
676 rq_data->rx_free_thresh = free_thresh;
677 dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
680 mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
681 RTE_PKTMBUF_HEADROOM);
682 /* max_rx_pkt_len includes the ethernet header and CRC. */
683 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
685 if (enic->rte_dev->data->dev_conf.rxmode.offloads &
686 DEV_RX_OFFLOAD_SCATTER) {
687 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
688 /* ceil((max pkt len)/mbuf_size) */
689 mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
691 dev_info(enic, "Scatter rx mode disabled\n");
693 if (max_rx_pkt_len > mbuf_size) {
694 dev_warning(enic, "The maximum Rx packet size (%u) is"
695 " larger than the mbuf size (%u), and"
696 " scatter is disabled. Larger packets will"
698 max_rx_pkt_len, mbuf_size);
702 if (mbufs_per_pkt > 1) {
703 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
704 rq_sop->data_queue_enable = 1;
707 * HW does not directly support rxmode.max_rx_pkt_len. HW always
708 * receives packet sizes up to the "max" MTU.
709 * If not using scatter, we can achieve the effect of dropping
710 * larger packets by reducing the size of posted buffers.
711 * See enic_alloc_rx_queue_mbufs().
714 enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu)) {
715 dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
716 " when scatter rx mode is in use.\n");
719 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
721 rq_sop->data_queue_enable = 0;
725 /* number of descriptors have to be a multiple of 32 */
726 nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
727 nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
729 rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
730 rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
732 if (mbufs_per_pkt > 1) {
734 max_sop = ((enic->config.rq_desc_count /
735 (mbufs_per_pkt - 1)) & ~0x1F);
736 min_data = min_sop * (mbufs_per_pkt - 1);
737 max_data = enic->config.rq_desc_count;
740 max_sop = enic->config.rq_desc_count;
745 if (nb_desc < (min_sop + min_data)) {
747 "Number of rx descs too low, adjusting to minimum\n");
748 nb_sop_desc = min_sop;
749 nb_data_desc = min_data;
750 } else if (nb_desc > (max_sop + max_data)) {
752 "Number of rx_descs too high, adjusting to maximum\n");
753 nb_sop_desc = max_sop;
754 nb_data_desc = max_data;
756 if (mbufs_per_pkt > 1) {
757 dev_info(enic, "For max packet size %u and mbuf size %u valid"
758 " rx descriptor range is %u to %u\n",
759 max_rx_pkt_len, mbuf_size, min_sop + min_data,
762 dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
763 nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
765 /* Allocate sop queue resources */
766 rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
767 nb_sop_desc, sizeof(struct rq_enet_desc));
769 dev_err(enic, "error in allocation of sop rq\n");
772 nb_sop_desc = rq_sop->ring.desc_count;
774 if (rq_data->in_use) {
775 /* Allocate data queue resources */
776 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
778 sizeof(struct rq_enet_desc));
780 dev_err(enic, "error in allocation of data rq\n");
781 goto err_free_rq_sop;
783 nb_data_desc = rq_data->ring.desc_count;
785 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
786 socket_id, nb_sop_desc + nb_data_desc,
787 sizeof(struct cq_enet_rq_desc));
789 dev_err(enic, "error in allocation of cq for rq\n");
790 goto err_free_rq_data;
793 /* Allocate the mbuf rings */
794 rq_sop->mbuf_ring = (struct rte_mbuf **)
795 rte_zmalloc_socket("rq->mbuf_ring",
796 sizeof(struct rte_mbuf *) * nb_sop_desc,
797 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
798 if (rq_sop->mbuf_ring == NULL)
801 if (rq_data->in_use) {
802 rq_data->mbuf_ring = (struct rte_mbuf **)
803 rte_zmalloc_socket("rq->mbuf_ring",
804 sizeof(struct rte_mbuf *) * nb_data_desc,
805 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
806 if (rq_data->mbuf_ring == NULL)
807 goto err_free_sop_mbuf;
810 rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
815 rte_free(rq_sop->mbuf_ring);
817 /* cleanup on error */
818 vnic_cq_free(&enic->cq[queue_idx]);
821 vnic_rq_free(rq_data);
823 vnic_rq_free(rq_sop);
828 void enic_free_wq(void *txq)
836 wq = (struct vnic_wq *)txq;
837 enic = vnic_dev_priv(wq->vdev);
838 rte_memzone_free(wq->cqmsg_rz);
840 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
843 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
844 unsigned int socket_id, uint16_t nb_desc)
847 struct vnic_wq *wq = &enic->wq[queue_idx];
848 unsigned int cq_index = enic_cq_wq(enic, queue_idx);
852 wq->socket_id = socket_id;
854 if (nb_desc > enic->config.wq_desc_count) {
856 "WQ %d - number of tx desc in cmd line (%d)"\
857 "is greater than that in the UCSM/CIMC adapter"\
858 "policy. Applying the value in the adapter "\
860 queue_idx, nb_desc, enic->config.wq_desc_count);
861 } else if (nb_desc != enic->config.wq_desc_count) {
862 enic->config.wq_desc_count = nb_desc;
864 "TX Queues - effective number of descs:%d\n",
869 /* Allocate queue resources */
870 err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
871 enic->config.wq_desc_count,
872 sizeof(struct wq_enet_desc));
874 dev_err(enic, "error in allocation of wq\n");
878 err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
879 socket_id, enic->config.wq_desc_count,
880 sizeof(struct cq_enet_wq_desc));
883 dev_err(enic, "error in allocation of cq for wq\n");
886 /* setup up CQ message */
887 snprintf((char *)name, sizeof(name),
888 "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
891 wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
892 sizeof(uint32_t), SOCKET_ID_ANY,
893 RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
900 int enic_disable(struct enic *enic)
905 for (i = 0; i < enic->intr_count; i++) {
906 vnic_intr_mask(&enic->intr[i]);
907 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
909 enic_rxq_intr_deinit(enic);
910 rte_intr_disable(&enic->pdev->intr_handle);
911 rte_intr_callback_unregister(&enic->pdev->intr_handle,
913 (void *)enic->rte_dev);
915 vnic_dev_disable(enic->vdev);
917 enic_clsf_destroy(enic);
919 if (!enic_is_sriov_vf(enic))
920 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
922 for (i = 0; i < enic->wq_count; i++) {
923 err = vnic_wq_disable(&enic->wq[i]);
927 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
928 if (enic->rq[i].in_use) {
929 err = vnic_rq_disable(&enic->rq[i]);
935 /* If we were using interrupts, set the interrupt vector to -1
936 * to disable interrupts. We are not disabling link notifcations,
937 * though, as we want the polling of link status to continue working.
939 if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
940 vnic_dev_notify_set(enic->vdev, -1);
942 vnic_dev_set_reset_flag(enic->vdev, 1);
944 for (i = 0; i < enic->wq_count; i++)
945 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
947 for (i = 0; i < enic_vnic_rq_count(enic); i++)
948 if (enic->rq[i].in_use)
949 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
950 for (i = 0; i < enic->cq_count; i++)
951 vnic_cq_clean(&enic->cq[i]);
952 for (i = 0; i < enic->intr_count; i++)
953 vnic_intr_clean(&enic->intr[i]);
958 static int enic_dev_wait(struct vnic_dev *vdev,
959 int (*start)(struct vnic_dev *, int),
960 int (*finished)(struct vnic_dev *, int *),
967 err = start(vdev, arg);
971 /* Wait for func to complete...2 seconds max */
972 for (i = 0; i < 2000; i++) {
973 err = finished(vdev, &done);
983 static int enic_dev_open(struct enic *enic)
986 int flags = CMD_OPENF_IG_DESCCACHE;
988 err = enic_dev_wait(enic->vdev, vnic_dev_open,
989 vnic_dev_open_done, flags);
991 dev_err(enic_get_dev(enic),
992 "vNIC device open failed, err %d\n", err);
997 static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
999 dma_addr_t rss_key_buf_pa;
1000 union vnic_rss_key *rss_key_buf_va = NULL;
1004 RTE_ASSERT(user_key != NULL);
1005 snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
1006 rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
1007 &rss_key_buf_pa, name);
1008 if (!rss_key_buf_va)
1011 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
1012 rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
1014 err = enic_set_rss_key(enic,
1016 sizeof(union vnic_rss_key));
1018 /* Save for later queries */
1020 rte_memcpy(&enic->rss_key, rss_key_buf_va,
1021 sizeof(union vnic_rss_key));
1023 enic_free_consistent(enic, sizeof(union vnic_rss_key),
1024 rss_key_buf_va, rss_key_buf_pa);
1029 int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
1031 dma_addr_t rss_cpu_buf_pa;
1032 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1036 snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
1037 rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
1038 &rss_cpu_buf_pa, name);
1039 if (!rss_cpu_buf_va)
1042 rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
1044 err = enic_set_rss_cpu(enic,
1046 sizeof(union vnic_rss_cpu));
1048 enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
1049 rss_cpu_buf_va, rss_cpu_buf_pa);
1051 /* Save for later queries */
1053 rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
1057 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1058 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1060 const u8 tso_ipid_split_en = 0;
1063 err = enic_set_nic_cfg(enic,
1064 rss_default_cpu, rss_hash_type,
1065 rss_hash_bits, rss_base_cpu,
1066 rss_enable, tso_ipid_split_en,
1067 enic->ig_vlan_strip_en);
1072 /* Initialize RSS with defaults, called from dev_configure */
1073 int enic_init_rss_nic_cfg(struct enic *enic)
1075 static uint8_t default_rss_key[] = {
1076 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
1077 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
1078 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
1079 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
1081 struct rte_eth_rss_conf rss_conf;
1082 union vnic_rss_cpu rss_cpu;
1085 rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
1087 * If setting key for the first time, and the user gives us none, then
1088 * push the default key to NIC.
1090 if (rss_conf.rss_key == NULL) {
1091 rss_conf.rss_key = default_rss_key;
1092 rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
1094 ret = enic_set_rss_conf(enic, &rss_conf);
1096 dev_err(enic, "Failed to configure RSS\n");
1099 if (enic->rss_enable) {
1100 /* If enabling RSS, use the default reta */
1101 for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
1102 rss_cpu.cpu[i / 4].b[i % 4] =
1103 enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
1105 ret = enic_set_rss_reta(enic, &rss_cpu);
1107 dev_err(enic, "Failed to set RSS indirection table\n");
1112 int enic_setup_finish(struct enic *enic)
1114 enic_init_soft_stats(enic);
1117 vnic_dev_packet_filter(enic->vdev,
1130 static int enic_rss_conf_valid(struct enic *enic,
1131 struct rte_eth_rss_conf *rss_conf)
1133 /* RSS is disabled per VIC settings. Ignore rss_conf. */
1134 if (enic->flow_type_rss_offloads == 0)
1136 if (rss_conf->rss_key != NULL &&
1137 rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
1138 dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
1139 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
1142 if (rss_conf->rss_hf != 0 &&
1143 (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
1144 dev_err(enic, "Given rss_hf contains none of the supported"
1151 /* Set hash type and key according to rss_conf */
1152 int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
1154 struct rte_eth_dev *eth_dev;
1160 RTE_ASSERT(rss_conf != NULL);
1161 ret = enic_rss_conf_valid(enic, rss_conf);
1163 dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
1167 eth_dev = enic->rte_dev;
1169 rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
1170 if (enic->rq_count > 1 &&
1171 (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
1174 if (rss_hf & ETH_RSS_IPV4)
1175 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
1176 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1177 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
1178 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
1180 * 'TCP' is not a typo. HW does not have a separate
1181 * enable bit for UDP RSS. The TCP bit enables both TCP
1184 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
1186 if (rss_hf & ETH_RSS_IPV6)
1187 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
1188 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1189 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1190 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
1191 /* Again, 'TCP' is not a typo. */
1192 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1194 if (rss_hf & ETH_RSS_IPV6_EX)
1195 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6_EX;
1196 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1197 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX;
1203 /* Set the hash key if provided */
1204 if (rss_enable && rss_conf->rss_key) {
1205 ret = enic_set_rsskey(enic, rss_conf->rss_key);
1207 dev_err(enic, "Failed to set RSS key\n");
1212 ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
1213 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1216 enic->rss_hf = rss_hf;
1217 enic->rss_hash_type = rss_hash_type;
1218 enic->rss_enable = rss_enable;
1223 int enic_set_vlan_strip(struct enic *enic)
1226 * Unfortunately, VLAN strip on/off and RSS on/off are configured
1227 * together. So, re-do niccfg, preserving the current RSS settings.
1229 return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
1230 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1234 void enic_add_packet_filter(struct enic *enic)
1236 /* Args -> directed, multicast, broadcast, promisc, allmulti */
1237 vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1238 enic->promisc, enic->allmulti);
1241 int enic_get_link_status(struct enic *enic)
1243 return vnic_dev_link_status(enic->vdev);
1246 static void enic_dev_deinit(struct enic *enic)
1248 struct rte_eth_dev *eth_dev = enic->rte_dev;
1250 /* stop link status checking */
1251 vnic_dev_notify_unset(enic->vdev);
1253 rte_free(eth_dev->data->mac_addrs);
1255 rte_free(enic->intr);
1261 int enic_set_vnic_res(struct enic *enic)
1263 struct rte_eth_dev *eth_dev = enic->rte_dev;
1265 unsigned int required_rq, required_wq, required_cq, required_intr;
1267 /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
1268 required_rq = eth_dev->data->nb_rx_queues * 2;
1269 required_wq = eth_dev->data->nb_tx_queues;
1270 required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
1271 required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
1272 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1273 required_intr += eth_dev->data->nb_rx_queues;
1276 if (enic->conf_rq_count < required_rq) {
1277 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1278 eth_dev->data->nb_rx_queues,
1279 required_rq, enic->conf_rq_count);
1282 if (enic->conf_wq_count < required_wq) {
1283 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1284 eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1288 if (enic->conf_cq_count < required_cq) {
1289 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1290 required_cq, enic->conf_cq_count);
1293 if (enic->conf_intr_count < required_intr) {
1294 dev_err(dev, "Not enough Interrupts to support Rx queue"
1295 " interrupts. Required:%u, Configured:%u\n",
1296 required_intr, enic->conf_intr_count);
1301 enic->rq_count = eth_dev->data->nb_rx_queues;
1302 enic->wq_count = eth_dev->data->nb_tx_queues;
1303 enic->cq_count = enic->rq_count + enic->wq_count;
1304 enic->intr_count = required_intr;
1310 /* Initialize the completion queue for an RQ */
1312 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1314 struct vnic_rq *sop_rq, *data_rq;
1315 unsigned int cq_idx;
1318 sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1319 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1322 vnic_cq_clean(&enic->cq[cq_idx]);
1323 vnic_cq_init(&enic->cq[cq_idx],
1324 0 /* flow_control_enable */,
1325 1 /* color_enable */,
1328 1 /* cq_tail_color */,
1329 0 /* interrupt_enable */,
1330 1 /* cq_entry_enable */,
1331 0 /* cq_message_enable */,
1332 0 /* interrupt offset */,
1333 0 /* cq_message_addr */);
1336 vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1337 enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1338 sop_rq->ring.desc_count - 1, 1, 0);
1339 if (data_rq->in_use) {
1340 vnic_rq_init_start(data_rq,
1342 enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1343 data_rq->ring.desc_count - 1, 1, 0);
1346 rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1350 if (data_rq->in_use) {
1351 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1353 enic_rxmbuf_queue_release(enic, sop_rq);
1361 /* The Cisco NIC can send and receive packets up to a max packet size
1362 * determined by the NIC type and firmware. There is also an MTU
1363 * configured into the NIC via the CIMC/UCSM management interface
1364 * which can be overridden by this function (up to the max packet size).
1365 * Depending on the network setup, doing so may cause packet drops
1366 * and unexpected behavior.
1368 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1370 unsigned int rq_idx;
1373 uint16_t old_mtu; /* previous setting */
1374 uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
1375 struct rte_eth_dev *eth_dev = enic->rte_dev;
1377 old_mtu = eth_dev->data->mtu;
1378 config_mtu = enic->config.mtu;
1380 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1381 return -E_RTE_SECONDARY;
1383 if (new_mtu > enic->max_mtu) {
1385 "MTU not updated: requested (%u) greater than max (%u)\n",
1386 new_mtu, enic->max_mtu);
1389 if (new_mtu < ENIC_MIN_MTU) {
1391 "MTU not updated: requested (%u) less than min (%u)\n",
1392 new_mtu, ENIC_MIN_MTU);
1395 if (new_mtu > config_mtu)
1397 "MTU (%u) is greater than value configured in NIC (%u)\n",
1398 new_mtu, config_mtu);
1400 /* The easy case is when scatter is disabled. However if the MTU
1401 * becomes greater than the mbuf data size, packet drops will ensue.
1403 if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
1404 DEV_RX_OFFLOAD_SCATTER)) {
1405 eth_dev->data->mtu = new_mtu;
1409 /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1410 * change Rx scatter mode if necessary for better performance. I.e. if
1411 * MTU was greater than the mbuf size and now it's less, scatter Rx
1412 * doesn't have to be used and vice versa.
1414 rte_spinlock_lock(&enic->mtu_lock);
1416 /* Stop traffic on all RQs */
1417 for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1418 rq = &enic->rq[rq_idx];
1419 if (rq->is_sop && rq->in_use) {
1420 rc = enic_stop_rq(enic,
1421 enic_sop_rq_idx_to_rte_idx(rq_idx));
1423 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1429 /* replace Rx function with a no-op to avoid getting stale pkts */
1430 eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1433 /* Allow time for threads to exit the real Rx function. */
1436 /* now it is safe to reconfigure the RQs */
1438 /* update the mtu */
1439 eth_dev->data->mtu = new_mtu;
1441 /* free and reallocate RQs with the new MTU */
1442 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1443 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1448 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1449 rq->tot_nb_desc, rq->rx_free_thresh);
1452 "Fatal MTU alloc error- No traffic will pass\n");
1456 rc = enic_reinit_rq(enic, rq_idx);
1459 "Fatal MTU RQ reinit- No traffic will pass\n");
1464 /* put back the real receive function */
1466 eth_dev->rx_pkt_burst = enic_recv_pkts;
1469 /* restart Rx traffic */
1470 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1471 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1472 if (rq->is_sop && rq->in_use)
1473 enic_start_rq(enic, rq_idx);
1477 dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
1478 rte_spinlock_unlock(&enic->mtu_lock);
1482 static int enic_dev_init(struct enic *enic)
1485 struct rte_eth_dev *eth_dev = enic->rte_dev;
1487 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1489 /* Get vNIC configuration
1491 err = enic_get_vnic_config(enic);
1493 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1497 /* Get available resource counts */
1498 enic_get_res_counts(enic);
1499 if (enic->conf_rq_count == 1) {
1500 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1501 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1502 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1505 /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1506 enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
1507 enic->conf_cq_count, 8);
1508 enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
1509 enic->conf_intr_count, 8);
1510 enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
1511 enic->conf_rq_count, 8);
1512 enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
1513 enic->conf_wq_count, 8);
1514 if (enic->conf_cq_count > 0 && enic->cq == NULL) {
1515 dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
1518 if (enic->conf_intr_count > 0 && enic->intr == NULL) {
1519 dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
1522 if (enic->conf_rq_count > 0 && enic->rq == NULL) {
1523 dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
1526 if (enic->conf_wq_count > 0 && enic->wq == NULL) {
1527 dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
1531 /* Get the supported filters */
1532 enic_fdir_info(enic);
1534 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
1535 * ENIC_MAX_MAC_ADDR, 0);
1536 if (!eth_dev->data->mac_addrs) {
1537 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1540 ether_addr_copy((struct ether_addr *) enic->mac_addr,
1541 eth_dev->data->mac_addrs);
1543 vnic_dev_set_reset_flag(enic->vdev, 0);
1545 LIST_INIT(&enic->flows);
1546 rte_spinlock_init(&enic->flows_lock);
1548 /* set up link status checking */
1549 vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1551 enic->overlay_offload = false;
1552 if (!enic->disable_overlay && enic->vxlan &&
1553 /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
1554 vnic_dev_overlay_offload_ctrl(enic->vdev,
1555 OVERLAY_FEATURE_VXLAN,
1556 OVERLAY_OFFLOAD_ENABLE) == 0) {
1557 enic->tx_offload_capa |=
1558 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1559 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
1560 DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
1562 * Do not add PKT_TX_OUTER_{IPV4,IPV6} as they are not
1563 * 'offload' flags (i.e. not part of PKT_TX_OFFLOAD_MASK).
1565 enic->tx_offload_mask |=
1566 PKT_TX_OUTER_IP_CKSUM |
1568 enic->overlay_offload = true;
1569 dev_info(enic, "Overlay offload is enabled\n");
1576 int enic_probe(struct enic *enic)
1578 struct rte_pci_device *pdev = enic->pdev;
1581 dev_debug(enic, " Initializing ENIC PMD\n");
1583 /* if this is a secondary process the hardware is already initialized */
1584 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1587 enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1588 enic->bar0.len = pdev->mem_resource[0].len;
1590 /* Register vNIC device */
1591 enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1593 dev_err(enic, "vNIC registration failed, aborting\n");
1597 LIST_INIT(&enic->memzone_list);
1598 rte_spinlock_init(&enic->memzone_list_lock);
1600 vnic_register_cbacks(enic->vdev,
1601 enic_alloc_consistent,
1602 enic_free_consistent);
1605 * Allocate the consistent memory for stats upfront so both primary and
1606 * secondary processes can dump stats.
1608 err = vnic_dev_alloc_stats_mem(enic->vdev);
1610 dev_err(enic, "Failed to allocate cmd memory, aborting\n");
1611 goto err_out_unregister;
1613 /* Issue device open to get device in known state */
1614 err = enic_dev_open(enic);
1616 dev_err(enic, "vNIC dev open failed, aborting\n");
1617 goto err_out_unregister;
1620 /* Set ingress vlan rewrite mode before vnic initialization */
1621 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1622 IG_VLAN_REWRITE_MODE_PASS_THRU);
1625 "Failed to set ingress vlan rewrite mode, aborting.\n");
1626 goto err_out_dev_close;
1629 /* Issue device init to initialize the vnic-to-switch link.
1630 * We'll start with carrier off and wait for link UP
1631 * notification later to turn on carrier. We don't need
1632 * to wait here for the vnic-to-switch link initialization
1633 * to complete; link UP notification is the indication that
1634 * the process is complete.
1637 err = vnic_dev_init(enic->vdev, 0);
1639 dev_err(enic, "vNIC dev init failed, aborting\n");
1640 goto err_out_dev_close;
1643 err = enic_dev_init(enic);
1645 dev_err(enic, "Device initialization failed, aborting\n");
1646 goto err_out_dev_close;
1652 vnic_dev_close(enic->vdev);
1654 vnic_dev_unregister(enic->vdev);
1659 void enic_remove(struct enic *enic)
1661 enic_dev_deinit(enic);
1662 vnic_dev_close(enic->vdev);
1663 vnic_dev_unregister(enic->vdev);