X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_kni%2Frte_kni.c;h=e388751e33cc3b4be27a076e450644dcc5181bea;hb=refs%2Fheads%2Fmempool_20200116b;hp=b8edd40f4ddb2c2df6e7f8671ead3c906c04c87a;hpb=edd2fafbc0b89bd98b23d4e34402bb79363cb70a;p=dpdk.git diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c index b8edd40f4d..e388751e33 100644 --- a/lib/librte_kni/rte_kni.c +++ b/lib/librte_kni/rte_kni.c @@ -2,7 +2,7 @@ * Copyright(c) 2010-2014 Intel Corporation */ -#ifndef RTE_EXEC_ENV_LINUXAPP +#ifndef RTE_EXEC_ENV_LINUX #error "KNI is not supported" #endif @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -21,7 +22,7 @@ #include #include #include -#include +#include #include "rte_kni_fifo.h" #define MAX_MBUF_BURST_NUM 32 @@ -59,7 +60,7 @@ struct rte_kni { uint16_t group_id; /**< Group ID of KNI devices */ uint32_t slot_id; /**< KNI pool slot ID */ struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */ - unsigned mbuf_size; /**< mbuf size */ + unsigned int mbuf_size; /**< mbuf size */ const struct rte_memzone *m_tx_q; /**< TX queue memzone */ const struct rte_memzone *m_rx_q; /**< RX queue memzone */ @@ -78,7 +79,7 @@ struct rte_kni { /* For request & response */ struct rte_kni_fifo *req_q; /**< Request queue */ struct rte_kni_fifo *resp_q; /**< Response queue */ - void * sync_addr; /**< Req/Resp Mem address */ + void *sync_addr; /**< Req/Resp Mem address */ struct rte_kni_ops ops; /**< operations for request */ }; @@ -97,6 +98,13 @@ static volatile int kni_fd = -1; int rte_kni_init(unsigned int max_kni_ifaces __rte_unused) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) + if (rte_eal_iova_mode() != RTE_IOVA_PA) { + RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n"); + return -1; + } +#endif + /* Check FD and open */ if (kni_fd < 0) { kni_fd = open("/dev/" KNI_DEVICE, O_RDWR); @@ -214,7 +222,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, return NULL; } - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); kni = __rte_kni_get(conf->name); if (kni != NULL) { @@ -234,7 +242,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, goto kni_fail; } - snprintf(kni->name, RTE_KNI_NAMESIZE, "%s", conf->name); + strlcpy(kni->name, conf->name, RTE_KNI_NAMESIZE); if (ops) memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops)); @@ -242,24 +250,17 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, kni->ops.port_id = UINT16_MAX; memset(&dev_info, 0, sizeof(dev_info)); - dev_info.bus = conf->addr.bus; - dev_info.devid = conf->addr.devid; - dev_info.function = conf->addr.function; - dev_info.vendor_id = conf->id.vendor_id; - dev_info.device_id = conf->id.device_id; dev_info.core_id = conf->core_id; dev_info.force_bind = conf->force_bind; dev_info.group_id = conf->group_id; dev_info.mbuf_size = conf->mbuf_size; dev_info.mtu = conf->mtu; + dev_info.min_mtu = conf->min_mtu; + dev_info.max_mtu = conf->max_mtu; - memcpy(dev_info.mac_addr, conf->mac_addr, ETHER_ADDR_LEN); + memcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN); - snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", conf->name); - - RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n", - dev_info.bus, dev_info.devid, dev_info.function, - dev_info.vendor_id, dev_info.device_id); + strlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE); ret = kni_reserve_mz(kni); if (ret < 0) @@ -304,6 +305,8 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, kni->group_id = conf->group_id; kni->mbuf_size = conf->mbuf_size; + dev_info.iova_mode = (rte_eal_iova_mode() == RTE_IOVA_VA) ? 1 : 0; + ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info); if (ret < 0) goto ioctl_fail; @@ -313,7 +316,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list); TAILQ_INSERT_TAIL(kni_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); /* Allocate mbufs and then put them into alloc_q */ kni_allocate_mbufs(kni); @@ -327,7 +330,7 @@ mz_fail: kni_fail: rte_free(te); unlock: - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); return NULL; } @@ -353,6 +356,19 @@ va2pa(struct rte_mbuf *m) (unsigned long)m->buf_iova)); } +static void * +va2pa_all(struct rte_mbuf *mbuf) +{ + void *phy_mbuf = va2pa(mbuf); + struct rte_mbuf *next = mbuf->next; + while (next) { + mbuf->next = va2pa(next); + mbuf = next; + next = mbuf->next; + } + return phy_mbuf; +} + static void obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj, unsigned obj_idx __rte_unused) @@ -390,7 +406,7 @@ rte_kni_release(struct rte_kni *kni) kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); TAILQ_FOREACH(te, kni_list, next) { if (te->data == kni) @@ -400,7 +416,7 @@ rte_kni_release(struct rte_kni *kni) if (te == NULL) goto unlock; - snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name); + strlcpy(dev_info.name, kni->name, sizeof(dev_info.name)); if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) { RTE_LOG(ERR, KNI, "Fail to release kni device\n"); goto unlock; @@ -408,7 +424,7 @@ rte_kni_release(struct rte_kni *kni) TAILQ_REMOVE(kni_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); /* mbufs in all fifo should be released, except request/response */ @@ -432,7 +448,7 @@ rte_kni_release(struct rte_kni *kni) return 0; unlock: - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); return -1; } @@ -451,7 +467,7 @@ kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[]) RTE_LOG(INFO, KNI, "Configure mac address of %d", port_id); ret = rte_eth_dev_default_mac_addr_set(port_id, - (struct ether_addr *)mac_addr); + (struct rte_ether_addr *)mac_addr); if (ret < 0) RTE_LOG(ERR, KNI, "Failed to config mac_addr for port %d\n", port_id); @@ -463,6 +479,8 @@ kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[]) static int kni_config_promiscusity(uint16_t port_id, uint8_t to_on) { + int ret; + if (!rte_eth_dev_is_valid_port(port_id)) { RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id); return -EINVAL; @@ -472,9 +490,35 @@ kni_config_promiscusity(uint16_t port_id, uint8_t to_on) port_id, to_on); if (to_on) - rte_eth_promiscuous_enable(port_id); + ret = rte_eth_promiscuous_enable(port_id); else - rte_eth_promiscuous_disable(port_id); + ret = rte_eth_promiscuous_disable(port_id); + + if (ret != 0) + RTE_LOG(ERR, KNI, + "Failed to %s promiscuous mode for port %u: %s\n", + to_on ? "enable" : "disable", port_id, + rte_strerror(-ret)); + + return ret; +} + +/* default callback for request of configuring allmulticast mode */ +static int +kni_config_allmulticast(uint16_t port_id, uint8_t to_on) +{ + if (!rte_eth_dev_is_valid_port(port_id)) { + RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id); + return -EINVAL; + } + + RTE_LOG(INFO, KNI, "Configure allmulticast mode of %d to %d\n", + port_id, to_on); + + if (to_on) + rte_eth_allmulticast_enable(port_id); + else + rte_eth_allmulticast_disable(port_id); return 0; } @@ -482,8 +526,8 @@ kni_config_promiscusity(uint16_t port_id, uint8_t to_on) int rte_kni_handle_request(struct rte_kni *kni) { - unsigned ret; - struct rte_kni_request *req; + unsigned int ret; + struct rte_kni_request *req = NULL; if (kni == NULL) return -1; @@ -507,8 +551,8 @@ rte_kni_handle_request(struct rte_kni *kni) break; case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */ if (kni->ops.config_network_if) - req->result = kni->ops.config_network_if(\ - kni->ops.port_id, req->if_up); + req->result = kni->ops.config_network_if(kni->ops.port_id, + req->if_up); break; case RTE_KNI_REQ_CHANGE_MAC_ADDR: /* Change MAC Address */ if (kni->ops.config_mac_address) @@ -526,6 +570,14 @@ rte_kni_handle_request(struct rte_kni *kni) req->result = kni_config_promiscusity( kni->ops.port_id, req->promiscusity); break; + case RTE_KNI_REQ_CHANGE_ALLMULTI: /* Change ALLMULTICAST MODE */ + if (kni->ops.config_allmulticast) + req->result = kni->ops.config_allmulticast( + kni->ops.port_id, req->allmulti); + else if (kni->ops.port_id != UINT16_MAX) + req->result = kni_config_allmulticast( + kni->ops.port_id, req->allmulti); + break; default: RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id); req->result = -EINVAL; @@ -543,14 +595,15 @@ rte_kni_handle_request(struct rte_kni *kni) } unsigned -rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) +rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num) { + num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num); void *phy_mbufs[num]; unsigned int ret; unsigned int i; for (i = 0; i < num; i++) - phy_mbufs[i] = va2pa(mbufs[i]); + phy_mbufs[i] = va2pa_all(mbufs[i]); ret = kni_fifo_put(kni->rx_q, phy_mbufs, num); @@ -561,9 +614,9 @@ rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) } unsigned -rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) +rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num) { - unsigned ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num); + unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num); /* If buffers removed, allocate mbufs and then put them into alloc_q */ if (ret) @@ -614,7 +667,7 @@ kni_allocate_mbufs(struct rte_kni *kni) return; } - allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) \ + allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) & (MAX_MBUF_BURST_NUM - 1); for (i = 0; i < allocq_free; i++) { pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool); @@ -649,11 +702,11 @@ rte_kni_get(const char *name) if (name == NULL || name[0] == '\0') return NULL; - rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_lock(); kni = __rte_kni_get(name); - rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_unlock(); return kni; } @@ -668,35 +721,36 @@ static enum kni_ops_status kni_check_request_register(struct rte_kni_ops *ops) { /* check if KNI request ops has been registered*/ - if( NULL == ops ) + if (ops == NULL) return KNI_REQ_NO_REGISTER; - if ((ops->change_mtu == NULL) - && (ops->config_network_if == NULL) - && (ops->config_mac_address == NULL) - && (ops->config_promiscusity == NULL)) + if (ops->change_mtu == NULL + && ops->config_network_if == NULL + && ops->config_mac_address == NULL + && ops->config_promiscusity == NULL + && ops->config_allmulticast == NULL) return KNI_REQ_NO_REGISTER; return KNI_REQ_REGISTERED; } int -rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops) +rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops) { enum kni_ops_status req_status; - if (NULL == ops) { + if (ops == NULL) { RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n"); return -1; } - if (NULL == kni) { + if (kni == NULL) { RTE_LOG(ERR, KNI, "Invalid kni info.\n"); return -1; } req_status = kni_check_request_register(&kni->ops); - if ( KNI_REQ_REGISTERED == req_status) { + if (req_status == KNI_REQ_REGISTERED) { RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n"); return -1; } @@ -708,7 +762,7 @@ rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops) int rte_kni_unregister_handlers(struct rte_kni *kni) { - if (NULL == kni) { + if (kni == NULL) { RTE_LOG(ERR, KNI, "Invalid kni info.\n"); return -1; } @@ -717,6 +771,47 @@ rte_kni_unregister_handlers(struct rte_kni *kni) return 0; } + +int +rte_kni_update_link(struct rte_kni *kni, unsigned int linkup) +{ + char path[64]; + char old_carrier[2]; + const char *new_carrier; + int old_linkup; + int fd, ret; + + if (kni == NULL) + return -1; + + snprintf(path, sizeof(path), "/sys/devices/virtual/net/%s/carrier", + kni->name); + + fd = open(path, O_RDWR); + if (fd == -1) { + RTE_LOG(ERR, KNI, "Failed to open file: %s.\n", path); + return -1; + } + + ret = read(fd, old_carrier, 2); + if (ret < 1) { + close(fd); + return -1; + } + old_linkup = (old_carrier[0] == '1'); + + new_carrier = linkup ? "1" : "0"; + ret = write(fd, new_carrier, 1); + if (ret < 1) { + RTE_LOG(ERR, KNI, "Failed to write file: %s.\n", path); + close(fd); + return -1; + } + + close(fd); + return old_linkup; +} + void rte_kni_close(void) {