1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
14 #include <rte_bus_pci.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
18 #include <rte_string_fns.h>
19 #include <rte_ethdev_driver.h>
21 #include "enic_compat.h"
23 #include "wq_enet_desc.h"
24 #include "rq_enet_desc.h"
25 #include "cq_enet_desc.h"
26 #include "vnic_enet.h"
31 #include "vnic_intr.h"
34 static inline int enic_is_sriov_vf(struct enic *enic)
36 return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
39 static int is_zero_addr(uint8_t *addr)
41 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
44 static int is_mcast_addr(uint8_t *addr)
49 static int is_eth_addr_valid(uint8_t *addr)
51 return !is_mcast_addr(addr) && !is_zero_addr(addr);
55 enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
59 if (!rq || !rq->mbuf_ring) {
60 dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
64 for (i = 0; i < rq->ring.desc_count; i++) {
65 if (rq->mbuf_ring[i]) {
66 rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
67 rq->mbuf_ring[i] = NULL;
72 static void enic_free_wq_buf(struct vnic_wq_buf *buf)
74 struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
76 rte_pktmbuf_free_seg(mbuf);
80 static void enic_log_q_error(struct enic *enic)
85 for (i = 0; i < enic->wq_count; i++) {
86 error_status = vnic_wq_error_status(&enic->wq[i]);
88 dev_err(enic, "WQ[%d] error_status %d\n", i,
92 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
93 if (!enic->rq[i].in_use)
95 error_status = vnic_rq_error_status(&enic->rq[i]);
97 dev_err(enic, "RQ[%d] error_status %d\n", i,
102 static void enic_clear_soft_stats(struct enic *enic)
104 struct enic_soft_stats *soft_stats = &enic->soft_stats;
105 rte_atomic64_clear(&soft_stats->rx_nombuf);
106 rte_atomic64_clear(&soft_stats->rx_packet_errors);
107 rte_atomic64_clear(&soft_stats->tx_oversized);
110 static void enic_init_soft_stats(struct enic *enic)
112 struct enic_soft_stats *soft_stats = &enic->soft_stats;
113 rte_atomic64_init(&soft_stats->rx_nombuf);
114 rte_atomic64_init(&soft_stats->rx_packet_errors);
115 rte_atomic64_init(&soft_stats->tx_oversized);
116 enic_clear_soft_stats(enic);
119 void enic_dev_stats_clear(struct enic *enic)
121 if (vnic_dev_stats_clear(enic->vdev))
122 dev_err(enic, "Error in clearing stats\n");
123 enic_clear_soft_stats(enic);
126 int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
128 struct vnic_stats *stats;
129 struct enic_soft_stats *soft_stats = &enic->soft_stats;
130 int64_t rx_truncated;
131 uint64_t rx_packet_errors;
132 int ret = vnic_dev_stats_dump(enic->vdev, &stats);
135 dev_err(enic, "Error in getting stats\n");
139 /* The number of truncated packets can only be calculated by
140 * subtracting a hardware counter from error packets received by
141 * the driver. Note: this causes transient inaccuracies in the
142 * ipackets count. Also, the length of truncated packets are
143 * counted in ibytes even though truncated packets are dropped
144 * which can make ibytes be slightly higher than it should be.
146 rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors);
147 rx_truncated = rx_packet_errors - stats->rx.rx_errors;
149 r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated;
150 r_stats->opackets = stats->tx.tx_frames_ok;
152 r_stats->ibytes = stats->rx.rx_bytes_ok;
153 r_stats->obytes = stats->tx.tx_bytes_ok;
155 r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
156 r_stats->oerrors = stats->tx.tx_errors
157 + rte_atomic64_read(&soft_stats->tx_oversized);
159 r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
161 r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
165 void enic_del_mac_address(struct enic *enic, int mac_index)
167 struct rte_eth_dev *eth_dev = enic->rte_dev;
168 uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
170 if (vnic_dev_del_addr(enic->vdev, mac_addr))
171 dev_err(enic, "del mac addr failed\n");
174 int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
178 if (!is_eth_addr_valid(mac_addr)) {
179 dev_err(enic, "invalid mac address\n");
183 err = vnic_dev_add_addr(enic->vdev, mac_addr);
185 dev_err(enic, "add mac addr failed\n");
190 enic_free_rq_buf(struct rte_mbuf **mbuf)
195 rte_pktmbuf_free(*mbuf);
199 void enic_init_vnic_resources(struct enic *enic)
201 unsigned int error_interrupt_enable = 1;
202 unsigned int error_interrupt_offset = 0;
203 unsigned int rxq_interrupt_enable = 0;
204 unsigned int rxq_interrupt_offset;
205 unsigned int index = 0;
207 struct vnic_rq *data_rq;
209 if (enic->rte_dev->data->dev_conf.intr_conf.rxq) {
210 rxq_interrupt_enable = 1;
211 rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
213 for (index = 0; index < enic->rq_count; index++) {
214 cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
216 vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
218 error_interrupt_enable,
219 error_interrupt_offset);
221 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
223 vnic_rq_init(data_rq,
225 error_interrupt_enable,
226 error_interrupt_offset);
228 vnic_cq_init(&enic->cq[cq_idx],
229 0 /* flow_control_enable */,
230 1 /* color_enable */,
233 1 /* cq_tail_color */,
234 rxq_interrupt_enable,
235 1 /* cq_entry_enable */,
236 0 /* cq_message_enable */,
237 rxq_interrupt_offset,
238 0 /* cq_message_addr */);
239 if (rxq_interrupt_enable)
240 rxq_interrupt_offset++;
243 for (index = 0; index < enic->wq_count; index++) {
244 vnic_wq_init(&enic->wq[index],
245 enic_cq_wq(enic, index),
246 error_interrupt_enable,
247 error_interrupt_offset);
249 cq_idx = enic_cq_wq(enic, index);
250 vnic_cq_init(&enic->cq[cq_idx],
251 0 /* flow_control_enable */,
252 1 /* color_enable */,
255 1 /* cq_tail_color */,
256 0 /* interrupt_enable */,
257 0 /* cq_entry_enable */,
258 1 /* cq_message_enable */,
259 0 /* interrupt offset */,
260 (u64)enic->wq[index].cqmsg_rz->iova);
263 for (index = 0; index < enic->intr_count; index++) {
264 vnic_intr_init(&enic->intr[index],
265 enic->config.intr_timer_usec,
266 enic->config.intr_timer_type,
267 /*mask_on_assertion*/1);
273 enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
276 struct rq_enet_desc *rqd = rq->ring.descs;
279 uint32_t max_rx_pkt_len;
285 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
286 rq->ring.desc_count);
289 * If *not* using scatter and the mbuf size is smaller than the
290 * requested max packet size (max_rx_pkt_len), then reduce the
291 * posted buffer size to max_rx_pkt_len. HW still receives packets
292 * larger than max_rx_pkt_len, but they will be truncated, which we
293 * drop in the rx handler. Not ideal, but better than returning
294 * large packets when the user is not expecting them.
296 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
297 rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
298 if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
299 rq_buf_len = max_rx_pkt_len;
300 for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
301 mb = rte_mbuf_raw_alloc(rq->mp);
303 dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
304 (unsigned)rq->index);
308 mb->data_off = RTE_PKTMBUF_HEADROOM;
309 dma_addr = (dma_addr_t)(mb->buf_iova
310 + RTE_PKTMBUF_HEADROOM);
311 rq_enet_desc_enc(rqd, dma_addr,
312 (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
313 : RQ_ENET_TYPE_NOT_SOP),
315 rq->mbuf_ring[i] = mb;
318 /* make sure all prior writes are complete before doing the PIO write */
321 /* Post all but the last buffer to VIC. */
322 rq->posted_index = rq->ring.desc_count - 1;
326 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
327 enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
328 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
329 iowrite32(0, &rq->ctrl->fetch_index);
337 enic_alloc_consistent(void *priv, size_t size,
338 dma_addr_t *dma_handle, u8 *name)
341 const struct rte_memzone *rz;
343 struct enic *enic = (struct enic *)priv;
344 struct enic_memzone_entry *mze;
346 rz = rte_memzone_reserve_aligned((const char *)name,
347 size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
349 pr_err("%s : Failed to allocate memory requested for %s\n",
355 *dma_handle = (dma_addr_t)rz->iova;
357 mze = rte_malloc("enic memzone entry",
358 sizeof(struct enic_memzone_entry), 0);
361 pr_err("%s : Failed to allocate memory for memzone list\n",
363 rte_memzone_free(rz);
369 rte_spinlock_lock(&enic->memzone_list_lock);
370 LIST_INSERT_HEAD(&enic->memzone_list, mze, entries);
371 rte_spinlock_unlock(&enic->memzone_list_lock);
377 enic_free_consistent(void *priv,
378 __rte_unused size_t size,
380 dma_addr_t dma_handle)
382 struct enic_memzone_entry *mze;
383 struct enic *enic = (struct enic *)priv;
385 rte_spinlock_lock(&enic->memzone_list_lock);
386 LIST_FOREACH(mze, &enic->memzone_list, entries) {
387 if (mze->rz->addr == vaddr &&
388 mze->rz->iova == dma_handle)
392 rte_spinlock_unlock(&enic->memzone_list_lock);
394 "Tried to free memory, but couldn't find it in the memzone list\n");
397 LIST_REMOVE(mze, entries);
398 rte_spinlock_unlock(&enic->memzone_list_lock);
399 rte_memzone_free(mze->rz);
403 int enic_link_update(struct enic *enic)
405 struct rte_eth_dev *eth_dev = enic->rte_dev;
409 link_status = enic_get_link_status(enic);
410 ret = (link_status == enic->link_status);
411 enic->link_status = link_status;
412 eth_dev->data->dev_link.link_status = link_status;
413 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
414 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
419 enic_intr_handler(void *arg)
421 struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
422 struct enic *enic = pmd_priv(dev);
424 vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
426 enic_link_update(enic);
427 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
428 enic_log_q_error(enic);
431 static int enic_rxq_intr_init(struct enic *enic)
433 struct rte_intr_handle *intr_handle;
434 uint32_t rxq_intr_count, i;
437 intr_handle = enic->rte_dev->intr_handle;
438 if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
441 * Rx queue interrupts only work when we have MSI-X interrupts,
442 * one per queue. Sharing one interrupt is technically
443 * possible with VIC, but it is not worth the complications it brings.
445 if (!rte_intr_cap_multiple(intr_handle)) {
446 dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
447 " (vfio-pci driver)\n");
450 rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
451 err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
453 dev_err(enic, "Failed to enable event fds for Rx queue"
457 intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
458 rxq_intr_count * sizeof(int), 0);
459 if (intr_handle->intr_vec == NULL) {
460 dev_err(enic, "Failed to allocate intr_vec\n");
463 for (i = 0; i < rxq_intr_count; i++)
464 intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
468 static void enic_rxq_intr_deinit(struct enic *enic)
470 struct rte_intr_handle *intr_handle;
472 intr_handle = enic->rte_dev->intr_handle;
473 rte_intr_efd_disable(intr_handle);
474 if (intr_handle->intr_vec != NULL) {
475 rte_free(intr_handle->intr_vec);
476 intr_handle->intr_vec = NULL;
480 int enic_enable(struct enic *enic)
484 struct rte_eth_dev *eth_dev = enic->rte_dev;
486 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
487 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
489 /* vnic notification of link status has already been turned on in
490 * enic_dev_init() which is called during probe time. Here we are
491 * just turning on interrupt vector 0 if needed.
493 if (eth_dev->data->dev_conf.intr_conf.lsc)
494 vnic_dev_notify_set(enic->vdev, 0);
496 err = enic_rxq_intr_init(enic);
499 if (enic_clsf_init(enic))
500 dev_warning(enic, "Init of hash table for clsf failed."\
501 "Flow director feature will not work\n");
503 for (index = 0; index < enic->rq_count; index++) {
504 err = enic_alloc_rx_queue_mbufs(enic,
505 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
507 dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
510 err = enic_alloc_rx_queue_mbufs(enic,
511 &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
513 /* release the allocated mbufs for the sop rq*/
514 enic_rxmbuf_queue_release(enic,
515 &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
517 dev_err(enic, "Failed to alloc data RX queue mbufs\n");
522 for (index = 0; index < enic->wq_count; index++)
523 enic_start_wq(enic, index);
524 for (index = 0; index < enic->rq_count; index++)
525 enic_start_rq(enic, index);
527 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
529 vnic_dev_enable_wait(enic->vdev);
531 /* Register and enable error interrupt */
532 rte_intr_callback_register(&(enic->pdev->intr_handle),
533 enic_intr_handler, (void *)enic->rte_dev);
535 rte_intr_enable(&(enic->pdev->intr_handle));
536 /* Unmask LSC interrupt */
537 vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
542 int enic_alloc_intr_resources(struct enic *enic)
547 dev_info(enic, "vNIC resources used: "\
548 "wq %d rq %d cq %d intr %d\n",
549 enic->wq_count, enic_vnic_rq_count(enic),
550 enic->cq_count, enic->intr_count);
552 for (i = 0; i < enic->intr_count; i++) {
553 err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
555 enic_free_vnic_resources(enic);
562 void enic_free_rq(void *rxq)
564 struct vnic_rq *rq_sop, *rq_data;
570 rq_sop = (struct vnic_rq *)rxq;
571 enic = vnic_dev_priv(rq_sop->vdev);
572 rq_data = &enic->rq[rq_sop->data_queue_idx];
574 enic_rxmbuf_queue_release(enic, rq_sop);
576 enic_rxmbuf_queue_release(enic, rq_data);
578 rte_free(rq_sop->mbuf_ring);
580 rte_free(rq_data->mbuf_ring);
582 rq_sop->mbuf_ring = NULL;
583 rq_data->mbuf_ring = NULL;
585 vnic_rq_free(rq_sop);
587 vnic_rq_free(rq_data);
589 vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
595 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
597 struct rte_eth_dev *eth_dev = enic->rte_dev;
598 vnic_wq_enable(&enic->wq[queue_idx]);
599 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
602 int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
604 struct rte_eth_dev *eth_dev = enic->rte_dev;
607 ret = vnic_wq_disable(&enic->wq[queue_idx]);
611 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
615 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
617 struct vnic_rq *rq_sop;
618 struct vnic_rq *rq_data;
619 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
620 rq_data = &enic->rq[rq_sop->data_queue_idx];
621 struct rte_eth_dev *eth_dev = enic->rte_dev;
624 vnic_rq_enable(rq_data);
626 vnic_rq_enable(rq_sop);
627 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
630 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
632 int ret1 = 0, ret2 = 0;
633 struct rte_eth_dev *eth_dev = enic->rte_dev;
634 struct vnic_rq *rq_sop;
635 struct vnic_rq *rq_data;
636 rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
637 rq_data = &enic->rq[rq_sop->data_queue_idx];
639 ret2 = vnic_rq_disable(rq_sop);
642 ret1 = vnic_rq_disable(rq_data);
649 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
653 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
654 unsigned int socket_id, struct rte_mempool *mp,
655 uint16_t nb_desc, uint16_t free_thresh)
658 uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
659 uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
660 struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
661 struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
662 unsigned int mbuf_size, mbufs_per_pkt;
663 unsigned int nb_sop_desc, nb_data_desc;
664 uint16_t min_sop, max_sop, min_data, max_data;
665 uint32_t max_rx_pkt_len;
668 rq_sop->data_queue_idx = data_queue_idx;
670 rq_data->data_queue_idx = 0;
671 rq_sop->socket_id = socket_id;
673 rq_data->socket_id = socket_id;
676 rq_sop->rx_free_thresh = free_thresh;
677 rq_data->rx_free_thresh = free_thresh;
678 dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
681 mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
682 RTE_PKTMBUF_HEADROOM);
683 /* max_rx_pkt_len includes the ethernet header and CRC. */
684 max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
686 if (enic->rte_dev->data->dev_conf.rxmode.offloads &
687 DEV_RX_OFFLOAD_SCATTER) {
688 dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
689 /* ceil((max pkt len)/mbuf_size) */
690 mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
692 dev_info(enic, "Scatter rx mode disabled\n");
694 if (max_rx_pkt_len > mbuf_size) {
695 dev_warning(enic, "The maximum Rx packet size (%u) is"
696 " larger than the mbuf size (%u), and"
697 " scatter is disabled. Larger packets will"
699 max_rx_pkt_len, mbuf_size);
703 if (mbufs_per_pkt > 1) {
704 dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
705 rq_sop->data_queue_enable = 1;
708 * HW does not directly support rxmode.max_rx_pkt_len. HW always
709 * receives packet sizes up to the "max" MTU.
710 * If not using scatter, we can achieve the effect of dropping
711 * larger packets by reducing the size of posted buffers.
712 * See enic_alloc_rx_queue_mbufs().
715 enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu)) {
716 dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
717 " when scatter rx mode is in use.\n");
720 dev_info(enic, "Rq %u Scatter rx mode not being used\n",
722 rq_sop->data_queue_enable = 0;
726 /* number of descriptors have to be a multiple of 32 */
727 nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
728 nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
730 rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
731 rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
733 if (mbufs_per_pkt > 1) {
735 max_sop = ((enic->config.rq_desc_count /
736 (mbufs_per_pkt - 1)) & ~0x1F);
737 min_data = min_sop * (mbufs_per_pkt - 1);
738 max_data = enic->config.rq_desc_count;
741 max_sop = enic->config.rq_desc_count;
746 if (nb_desc < (min_sop + min_data)) {
748 "Number of rx descs too low, adjusting to minimum\n");
749 nb_sop_desc = min_sop;
750 nb_data_desc = min_data;
751 } else if (nb_desc > (max_sop + max_data)) {
753 "Number of rx_descs too high, adjusting to maximum\n");
754 nb_sop_desc = max_sop;
755 nb_data_desc = max_data;
757 if (mbufs_per_pkt > 1) {
758 dev_info(enic, "For max packet size %u and mbuf size %u valid"
759 " rx descriptor range is %u to %u\n",
760 max_rx_pkt_len, mbuf_size, min_sop + min_data,
763 dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
764 nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
766 /* Allocate sop queue resources */
767 rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
768 nb_sop_desc, sizeof(struct rq_enet_desc));
770 dev_err(enic, "error in allocation of sop rq\n");
773 nb_sop_desc = rq_sop->ring.desc_count;
775 if (rq_data->in_use) {
776 /* Allocate data queue resources */
777 rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
779 sizeof(struct rq_enet_desc));
781 dev_err(enic, "error in allocation of data rq\n");
782 goto err_free_rq_sop;
784 nb_data_desc = rq_data->ring.desc_count;
786 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
787 socket_id, nb_sop_desc + nb_data_desc,
788 sizeof(struct cq_enet_rq_desc));
790 dev_err(enic, "error in allocation of cq for rq\n");
791 goto err_free_rq_data;
794 /* Allocate the mbuf rings */
795 rq_sop->mbuf_ring = (struct rte_mbuf **)
796 rte_zmalloc_socket("rq->mbuf_ring",
797 sizeof(struct rte_mbuf *) * nb_sop_desc,
798 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
799 if (rq_sop->mbuf_ring == NULL)
802 if (rq_data->in_use) {
803 rq_data->mbuf_ring = (struct rte_mbuf **)
804 rte_zmalloc_socket("rq->mbuf_ring",
805 sizeof(struct rte_mbuf *) * nb_data_desc,
806 RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
807 if (rq_data->mbuf_ring == NULL)
808 goto err_free_sop_mbuf;
811 rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
816 rte_free(rq_sop->mbuf_ring);
818 /* cleanup on error */
819 vnic_cq_free(&enic->cq[queue_idx]);
822 vnic_rq_free(rq_data);
824 vnic_rq_free(rq_sop);
829 void enic_free_wq(void *txq)
837 wq = (struct vnic_wq *)txq;
838 enic = vnic_dev_priv(wq->vdev);
839 rte_memzone_free(wq->cqmsg_rz);
841 vnic_cq_free(&enic->cq[enic->rq_count + wq->index]);
844 int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
845 unsigned int socket_id, uint16_t nb_desc)
848 struct vnic_wq *wq = &enic->wq[queue_idx];
849 unsigned int cq_index = enic_cq_wq(enic, queue_idx);
853 wq->socket_id = socket_id;
855 if (nb_desc > enic->config.wq_desc_count) {
857 "WQ %d - number of tx desc in cmd line (%d)"\
858 "is greater than that in the UCSM/CIMC adapter"\
859 "policy. Applying the value in the adapter "\
861 queue_idx, nb_desc, enic->config.wq_desc_count);
862 } else if (nb_desc != enic->config.wq_desc_count) {
863 enic->config.wq_desc_count = nb_desc;
865 "TX Queues - effective number of descs:%d\n",
870 /* Allocate queue resources */
871 err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
872 enic->config.wq_desc_count,
873 sizeof(struct wq_enet_desc));
875 dev_err(enic, "error in allocation of wq\n");
879 err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
880 socket_id, enic->config.wq_desc_count,
881 sizeof(struct cq_enet_wq_desc));
884 dev_err(enic, "error in allocation of cq for wq\n");
887 /* setup up CQ message */
888 snprintf((char *)name, sizeof(name),
889 "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx,
892 wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
902 int enic_disable(struct enic *enic)
907 for (i = 0; i < enic->intr_count; i++) {
908 vnic_intr_mask(&enic->intr[i]);
909 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
911 enic_rxq_intr_deinit(enic);
912 rte_intr_disable(&enic->pdev->intr_handle);
913 rte_intr_callback_unregister(&enic->pdev->intr_handle,
915 (void *)enic->rte_dev);
917 vnic_dev_disable(enic->vdev);
919 enic_clsf_destroy(enic);
921 if (!enic_is_sriov_vf(enic))
922 vnic_dev_del_addr(enic->vdev, enic->mac_addr);
924 for (i = 0; i < enic->wq_count; i++) {
925 err = vnic_wq_disable(&enic->wq[i]);
929 for (i = 0; i < enic_vnic_rq_count(enic); i++) {
930 if (enic->rq[i].in_use) {
931 err = vnic_rq_disable(&enic->rq[i]);
937 /* If we were using interrupts, set the interrupt vector to -1
938 * to disable interrupts. We are not disabling link notifcations,
939 * though, as we want the polling of link status to continue working.
941 if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
942 vnic_dev_notify_set(enic->vdev, -1);
944 vnic_dev_set_reset_flag(enic->vdev, 1);
946 for (i = 0; i < enic->wq_count; i++)
947 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
949 for (i = 0; i < enic_vnic_rq_count(enic); i++)
950 if (enic->rq[i].in_use)
951 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
952 for (i = 0; i < enic->cq_count; i++)
953 vnic_cq_clean(&enic->cq[i]);
954 for (i = 0; i < enic->intr_count; i++)
955 vnic_intr_clean(&enic->intr[i]);
960 static int enic_dev_wait(struct vnic_dev *vdev,
961 int (*start)(struct vnic_dev *, int),
962 int (*finished)(struct vnic_dev *, int *),
969 err = start(vdev, arg);
973 /* Wait for func to complete...2 seconds max */
974 for (i = 0; i < 2000; i++) {
975 err = finished(vdev, &done);
985 static int enic_dev_open(struct enic *enic)
989 err = enic_dev_wait(enic->vdev, vnic_dev_open,
990 vnic_dev_open_done, 0);
992 dev_err(enic_get_dev(enic),
993 "vNIC device open failed, err %d\n", err);
998 static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
1000 dma_addr_t rss_key_buf_pa;
1001 union vnic_rss_key *rss_key_buf_va = NULL;
1005 RTE_ASSERT(user_key != NULL);
1006 snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
1007 rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
1008 &rss_key_buf_pa, name);
1009 if (!rss_key_buf_va)
1012 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
1013 rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
1015 err = enic_set_rss_key(enic,
1017 sizeof(union vnic_rss_key));
1019 /* Save for later queries */
1021 rte_memcpy(&enic->rss_key, rss_key_buf_va,
1022 sizeof(union vnic_rss_key));
1024 enic_free_consistent(enic, sizeof(union vnic_rss_key),
1025 rss_key_buf_va, rss_key_buf_pa);
1030 int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
1032 dma_addr_t rss_cpu_buf_pa;
1033 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
1037 snprintf((char *)name, NAME_MAX, "rss_cpu-%s", enic->bdf_name);
1038 rss_cpu_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_cpu),
1039 &rss_cpu_buf_pa, name);
1040 if (!rss_cpu_buf_va)
1043 rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
1045 err = enic_set_rss_cpu(enic,
1047 sizeof(union vnic_rss_cpu));
1049 enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
1050 rss_cpu_buf_va, rss_cpu_buf_pa);
1052 /* Save for later queries */
1054 rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
1058 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
1059 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1061 const u8 tso_ipid_split_en = 0;
1064 err = enic_set_nic_cfg(enic,
1065 rss_default_cpu, rss_hash_type,
1066 rss_hash_bits, rss_base_cpu,
1067 rss_enable, tso_ipid_split_en,
1068 enic->ig_vlan_strip_en);
1073 /* Initialize RSS with defaults, called from dev_configure */
1074 int enic_init_rss_nic_cfg(struct enic *enic)
1076 static uint8_t default_rss_key[] = {
1077 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
1078 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
1079 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
1080 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
1082 struct rte_eth_rss_conf rss_conf;
1083 union vnic_rss_cpu rss_cpu;
1086 rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
1088 * If setting key for the first time, and the user gives us none, then
1089 * push the default key to NIC.
1091 if (rss_conf.rss_key == NULL) {
1092 rss_conf.rss_key = default_rss_key;
1093 rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
1095 ret = enic_set_rss_conf(enic, &rss_conf);
1097 dev_err(enic, "Failed to configure RSS\n");
1100 if (enic->rss_enable) {
1101 /* If enabling RSS, use the default reta */
1102 for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
1103 rss_cpu.cpu[i / 4].b[i % 4] =
1104 enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
1106 ret = enic_set_rss_reta(enic, &rss_cpu);
1108 dev_err(enic, "Failed to set RSS indirection table\n");
1113 int enic_setup_finish(struct enic *enic)
1115 enic_init_soft_stats(enic);
1118 vnic_dev_packet_filter(enic->vdev,
1131 static int enic_rss_conf_valid(struct enic *enic,
1132 struct rte_eth_rss_conf *rss_conf)
1134 /* RSS is disabled per VIC settings. Ignore rss_conf. */
1135 if (enic->flow_type_rss_offloads == 0)
1137 if (rss_conf->rss_key != NULL &&
1138 rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
1139 dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
1140 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
1143 if (rss_conf->rss_hf != 0 &&
1144 (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
1145 dev_err(enic, "Given rss_hf contains none of the supported"
1152 /* Set hash type and key according to rss_conf */
1153 int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
1155 struct rte_eth_dev *eth_dev;
1161 RTE_ASSERT(rss_conf != NULL);
1162 ret = enic_rss_conf_valid(enic, rss_conf);
1164 dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
1168 eth_dev = enic->rte_dev;
1170 rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
1171 if (enic->rq_count > 1 &&
1172 (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
1175 if (rss_hf & ETH_RSS_IPV4)
1176 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
1177 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1178 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
1179 if (rss_hf & ETH_RSS_IPV6)
1180 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
1181 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1182 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
1183 if (rss_hf & ETH_RSS_IPV6_EX)
1184 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6_EX;
1185 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1186 rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX;
1192 /* Set the hash key if provided */
1193 if (rss_enable && rss_conf->rss_key) {
1194 ret = enic_set_rsskey(enic, rss_conf->rss_key);
1196 dev_err(enic, "Failed to set RSS key\n");
1201 ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
1202 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1205 enic->rss_hf = rss_hf;
1206 enic->rss_hash_type = rss_hash_type;
1207 enic->rss_enable = rss_enable;
1212 int enic_set_vlan_strip(struct enic *enic)
1215 * Unfortunately, VLAN strip on/off and RSS on/off are configured
1216 * together. So, re-do niccfg, preserving the current RSS settings.
1218 return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
1219 ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
1223 void enic_add_packet_filter(struct enic *enic)
1225 /* Args -> directed, multicast, broadcast, promisc, allmulti */
1226 vnic_dev_packet_filter(enic->vdev, 1, 1, 1,
1227 enic->promisc, enic->allmulti);
1230 int enic_get_link_status(struct enic *enic)
1232 return vnic_dev_link_status(enic->vdev);
1235 static void enic_dev_deinit(struct enic *enic)
1237 struct rte_eth_dev *eth_dev = enic->rte_dev;
1239 /* stop link status checking */
1240 vnic_dev_notify_unset(enic->vdev);
1242 rte_free(eth_dev->data->mac_addrs);
1244 rte_free(enic->intr);
1250 int enic_set_vnic_res(struct enic *enic)
1252 struct rte_eth_dev *eth_dev = enic->rte_dev;
1254 unsigned int required_rq, required_wq, required_cq, required_intr;
1256 /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
1257 required_rq = eth_dev->data->nb_rx_queues * 2;
1258 required_wq = eth_dev->data->nb_tx_queues;
1259 required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
1260 required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
1261 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1262 required_intr += eth_dev->data->nb_rx_queues;
1265 if (enic->conf_rq_count < required_rq) {
1266 dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
1267 eth_dev->data->nb_rx_queues,
1268 required_rq, enic->conf_rq_count);
1271 if (enic->conf_wq_count < required_wq) {
1272 dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
1273 eth_dev->data->nb_tx_queues, enic->conf_wq_count);
1277 if (enic->conf_cq_count < required_cq) {
1278 dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
1279 required_cq, enic->conf_cq_count);
1282 if (enic->conf_intr_count < required_intr) {
1283 dev_err(dev, "Not enough Interrupts to support Rx queue"
1284 " interrupts. Required:%u, Configured:%u\n",
1285 required_intr, enic->conf_intr_count);
1290 enic->rq_count = eth_dev->data->nb_rx_queues;
1291 enic->wq_count = eth_dev->data->nb_tx_queues;
1292 enic->cq_count = enic->rq_count + enic->wq_count;
1293 enic->intr_count = required_intr;
1299 /* Initialize the completion queue for an RQ */
1301 enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
1303 struct vnic_rq *sop_rq, *data_rq;
1304 unsigned int cq_idx;
1307 sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1308 data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
1311 vnic_cq_clean(&enic->cq[cq_idx]);
1312 vnic_cq_init(&enic->cq[cq_idx],
1313 0 /* flow_control_enable */,
1314 1 /* color_enable */,
1317 1 /* cq_tail_color */,
1318 0 /* interrupt_enable */,
1319 1 /* cq_entry_enable */,
1320 0 /* cq_message_enable */,
1321 0 /* interrupt offset */,
1322 0 /* cq_message_addr */);
1325 vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
1326 enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
1327 sop_rq->ring.desc_count - 1, 1, 0);
1328 if (data_rq->in_use) {
1329 vnic_rq_init_start(data_rq,
1331 enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
1332 data_rq->ring.desc_count - 1, 1, 0);
1335 rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
1339 if (data_rq->in_use) {
1340 rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
1342 enic_rxmbuf_queue_release(enic, sop_rq);
1350 /* The Cisco NIC can send and receive packets up to a max packet size
1351 * determined by the NIC type and firmware. There is also an MTU
1352 * configured into the NIC via the CIMC/UCSM management interface
1353 * which can be overridden by this function (up to the max packet size).
1354 * Depending on the network setup, doing so may cause packet drops
1355 * and unexpected behavior.
1357 int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
1359 unsigned int rq_idx;
1362 uint16_t old_mtu; /* previous setting */
1363 uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
1364 struct rte_eth_dev *eth_dev = enic->rte_dev;
1366 old_mtu = eth_dev->data->mtu;
1367 config_mtu = enic->config.mtu;
1369 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1370 return -E_RTE_SECONDARY;
1372 if (new_mtu > enic->max_mtu) {
1374 "MTU not updated: requested (%u) greater than max (%u)\n",
1375 new_mtu, enic->max_mtu);
1378 if (new_mtu < ENIC_MIN_MTU) {
1380 "MTU not updated: requested (%u) less than min (%u)\n",
1381 new_mtu, ENIC_MIN_MTU);
1384 if (new_mtu > config_mtu)
1386 "MTU (%u) is greater than value configured in NIC (%u)\n",
1387 new_mtu, config_mtu);
1389 /* The easy case is when scatter is disabled. However if the MTU
1390 * becomes greater than the mbuf data size, packet drops will ensue.
1392 if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
1393 DEV_RX_OFFLOAD_SCATTER)) {
1394 eth_dev->data->mtu = new_mtu;
1398 /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
1399 * change Rx scatter mode if necessary for better performance. I.e. if
1400 * MTU was greater than the mbuf size and now it's less, scatter Rx
1401 * doesn't have to be used and vice versa.
1403 rte_spinlock_lock(&enic->mtu_lock);
1405 /* Stop traffic on all RQs */
1406 for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
1407 rq = &enic->rq[rq_idx];
1408 if (rq->is_sop && rq->in_use) {
1409 rc = enic_stop_rq(enic,
1410 enic_sop_rq_idx_to_rte_idx(rq_idx));
1412 dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
1418 /* replace Rx function with a no-op to avoid getting stale pkts */
1419 eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
1422 /* Allow time for threads to exit the real Rx function. */
1425 /* now it is safe to reconfigure the RQs */
1427 /* update the mtu */
1428 eth_dev->data->mtu = new_mtu;
1430 /* free and reallocate RQs with the new MTU */
1431 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1432 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1435 rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
1436 rq->tot_nb_desc, rq->rx_free_thresh);
1439 "Fatal MTU alloc error- No traffic will pass\n");
1443 rc = enic_reinit_rq(enic, rq_idx);
1446 "Fatal MTU RQ reinit- No traffic will pass\n");
1451 /* put back the real receive function */
1453 eth_dev->rx_pkt_burst = enic_recv_pkts;
1456 /* restart Rx traffic */
1457 for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
1458 rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
1459 if (rq->is_sop && rq->in_use)
1460 enic_start_rq(enic, rq_idx);
1464 dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
1465 rte_spinlock_unlock(&enic->mtu_lock);
1469 static int enic_dev_init(struct enic *enic)
1472 struct rte_eth_dev *eth_dev = enic->rte_dev;
1474 vnic_dev_intr_coal_timer_info_default(enic->vdev);
1476 /* Get vNIC configuration
1478 err = enic_get_vnic_config(enic);
1480 dev_err(dev, "Get vNIC configuration failed, aborting\n");
1484 /* Get available resource counts */
1485 enic_get_res_counts(enic);
1486 if (enic->conf_rq_count == 1) {
1487 dev_err(enic, "Running with only 1 RQ configured in the vNIC is not supported.\n");
1488 dev_err(enic, "Please configure 2 RQs in the vNIC for each Rx queue used by DPDK.\n");
1489 dev_err(enic, "See the ENIC PMD guide for more information.\n");
1492 /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1493 enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
1494 enic->conf_cq_count, 8);
1495 enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
1496 enic->conf_intr_count, 8);
1497 enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
1498 enic->conf_rq_count, 8);
1499 enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
1500 enic->conf_wq_count, 8);
1501 if (enic->conf_cq_count > 0 && enic->cq == NULL) {
1502 dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
1505 if (enic->conf_intr_count > 0 && enic->intr == NULL) {
1506 dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
1509 if (enic->conf_rq_count > 0 && enic->rq == NULL) {
1510 dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
1513 if (enic->conf_wq_count > 0 && enic->wq == NULL) {
1514 dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
1518 /* Get the supported filters */
1519 enic_fdir_info(enic);
1521 eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
1522 * ENIC_MAX_MAC_ADDR, 0);
1523 if (!eth_dev->data->mac_addrs) {
1524 dev_err(enic, "mac addr storage alloc failed, aborting.\n");
1527 ether_addr_copy((struct ether_addr *) enic->mac_addr,
1528 eth_dev->data->mac_addrs);
1530 vnic_dev_set_reset_flag(enic->vdev, 0);
1532 LIST_INIT(&enic->flows);
1533 rte_spinlock_init(&enic->flows_lock);
1535 /* set up link status checking */
1536 vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
1542 int enic_probe(struct enic *enic)
1544 struct rte_pci_device *pdev = enic->pdev;
1547 dev_debug(enic, " Initializing ENIC PMD\n");
1549 /* if this is a secondary process the hardware is already initialized */
1550 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1553 enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
1554 enic->bar0.len = pdev->mem_resource[0].len;
1556 /* Register vNIC device */
1557 enic->vdev = vnic_dev_register(NULL, enic, enic->pdev, &enic->bar0, 1);
1559 dev_err(enic, "vNIC registration failed, aborting\n");
1563 LIST_INIT(&enic->memzone_list);
1564 rte_spinlock_init(&enic->memzone_list_lock);
1566 vnic_register_cbacks(enic->vdev,
1567 enic_alloc_consistent,
1568 enic_free_consistent);
1571 * Allocate the consistent memory for stats upfront so both primary and
1572 * secondary processes can dump stats.
1574 err = vnic_dev_alloc_stats_mem(enic->vdev);
1576 dev_err(enic, "Failed to allocate cmd memory, aborting\n");
1577 goto err_out_unregister;
1579 /* Issue device open to get device in known state */
1580 err = enic_dev_open(enic);
1582 dev_err(enic, "vNIC dev open failed, aborting\n");
1583 goto err_out_unregister;
1586 /* Set ingress vlan rewrite mode before vnic initialization */
1587 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
1588 IG_VLAN_REWRITE_MODE_PASS_THRU);
1591 "Failed to set ingress vlan rewrite mode, aborting.\n");
1592 goto err_out_dev_close;
1595 /* Issue device init to initialize the vnic-to-switch link.
1596 * We'll start with carrier off and wait for link UP
1597 * notification later to turn on carrier. We don't need
1598 * to wait here for the vnic-to-switch link initialization
1599 * to complete; link UP notification is the indication that
1600 * the process is complete.
1603 err = vnic_dev_init(enic->vdev, 0);
1605 dev_err(enic, "vNIC dev init failed, aborting\n");
1606 goto err_out_dev_close;
1609 err = enic_dev_init(enic);
1611 dev_err(enic, "Device initialization failed, aborting\n");
1612 goto err_out_dev_close;
1618 vnic_dev_close(enic->vdev);
1620 vnic_dev_unregister(enic->vdev);
1625 void enic_remove(struct enic *enic)
1627 enic_dev_deinit(enic);
1628 vnic_dev_close(enic->vdev);
1629 vnic_dev_unregister(enic->vdev);